max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
challenges/ch-28/quick_sort/quick_sort/quick_sort/quick_sort.py
|
Talafhamohammad-cloud/data-structures-and-algorithms-python
| 0
|
6625651
|
arr1 = [8, 4, 23, 42, 16, 15]
arr2 = [20, 18, 12, 8, 5, -2]
arr3 = [5, 12, 7, 5, 5, 7]
arr4 = [2, 3, 5, 7, 13, 11]
def partition(array, low, high):
pivot = array[high]
i = low - 1
for j in range(low, high):
if array[j] <= pivot:
i = i + 1
(array[i], array[j]) = (array[j], array[i])
(array[i + 1], array[high]) = (array[high], array[i + 1])
return i + 1
def quickSort(array, low, high):
if low < high:
pi = partition(array, low, high)
quickSort(array, low, pi - 1)
quickSort(array, pi + 1, high)
print("###############################################################")
print("orginal array")
print(arr1)
size = len(arr1)
quickSort(arr1, 0, size - 1)
print('sorted array:')
print(arr1)
print("###############################################################")
print("orginal array")
print(arr2)
size = len(arr2)
quickSort(arr2, 0, size - 1)
print('sorted array:')
print(arr2)
print("###############################################################")
print("orginal array")
print(arr3)
size = len(arr3)
quickSort(arr3, 0, size - 1)
print('sorted array:')
print(arr3)
print("###############################################################")
print("orginal array")
print(arr4)
size = len(arr4)
quickSort(arr4, 0, size - 1)
print('sorted array:')
print(arr4)
print("###############################################################")
|
arr1 = [8, 4, 23, 42, 16, 15]
arr2 = [20, 18, 12, 8, 5, -2]
arr3 = [5, 12, 7, 5, 5, 7]
arr4 = [2, 3, 5, 7, 13, 11]
def partition(array, low, high):
pivot = array[high]
i = low - 1
for j in range(low, high):
if array[j] <= pivot:
i = i + 1
(array[i], array[j]) = (array[j], array[i])
(array[i + 1], array[high]) = (array[high], array[i + 1])
return i + 1
def quickSort(array, low, high):
if low < high:
pi = partition(array, low, high)
quickSort(array, low, pi - 1)
quickSort(array, pi + 1, high)
print("###############################################################")
print("orginal array")
print(arr1)
size = len(arr1)
quickSort(arr1, 0, size - 1)
print('sorted array:')
print(arr1)
print("###############################################################")
print("orginal array")
print(arr2)
size = len(arr2)
quickSort(arr2, 0, size - 1)
print('sorted array:')
print(arr2)
print("###############################################################")
print("orginal array")
print(arr3)
size = len(arr3)
quickSort(arr3, 0, size - 1)
print('sorted array:')
print(arr3)
print("###############################################################")
print("orginal array")
print(arr4)
size = len(arr4)
quickSort(arr4, 0, size - 1)
print('sorted array:')
print(arr4)
print("###############################################################")
|
de
| 0.797399
|
##############################################################") ##############################################################") ##############################################################") ##############################################################") ##############################################################")
| 3.663706
| 4
|
utils/scripts/OOOlevelGen/src/levels/The_Grand_Final.py
|
fullscreennl/monkeyswipe
| 0
|
6625652
|
<reponame>fullscreennl/monkeyswipe<filename>utils/scripts/OOOlevelGen/src/levels/The_Grand_Final.py<gh_stars>0
import LevelBuilder
from sprites import *
def render(name,bg):
lb = LevelBuilder.LevelBuilder(name+".plist",background=bg)
lb.addObject(Hero.HeroSprite(x=160,y=40))
lb.addObject(EnemyBucketWithStar.EnemyBucketWithStarSprite(width=150,height=100, x=240, y=160, num_enemies=30, enemy_size=20))
lb.addObject(Beam.BeamSprite(x=240,y=160,width=10,height=10,static='true',angle=0).setName("platform"))
lb.addObject(EnemyEquipedRotor.EnemyEquipedRotorSprite(x=240,y=250,scaling=1.0,speed=3,torque=10000))
lb.addObject(CyclingEnemyObject.CyclingEnemyObjectSprite(name='num1',x=65,y=160,width=80,height=80,enemy_size=20))
#lb.addObject(CyclingEnemyObject.CyclingEnemyObjectSprite(name='num2',x=420,y=160,width=80,height=80,enemy_size=20))
distJoint = Joints.DistanceJoint(body1="Bomb",body2="platform",damping='10',freq='0')
lb.addObject(distJoint)
lb.addObject(Friend.FriendSprite(x=300,y=125,width=64,height=64,denstity=0.02))
lb.addObject(Beam.BeamSprite(x=480,y=-100,width=200,height=200,static='true',angle=45))
lb.addObject(Beam.BeamSprite(x=0,y=-100,width=240,height=240,static='true',angle=60))
lb.addObject(Wizard.WizardSprite(x=450,y=160));
lb.addObject(Contacts.Contact(body1='Hero',body2=':hat_top',event_name='onReleaseStar'))
lb.addObject(Enemy.EnemySprite(x=240,y=160,width=50,height=50,restitution=0.8,static='false').setName('StarHolder'))
revJoint = Joints.RevoluteJoint(body1='Star',body2='StarHolder',motor_speed='50.0',torque='1000.0',
enable_motor='true',lower_angle='12',upper_angle='45',
enable_limit='false',collide_connected='false',userData='star_joint')
lb.addObject(revJoint)
lb.addObject(Contacts.Contact(body1='StarHolder',body2='Hero',event_name='onLose'))
lb.addObject(SpikeyBuddy.SpikeyBuddySprite(x=20,y=40,width=40,height=40,density=6,restitution=0.6))
lb.addObject(Launcher.LauncherSprite(name='__launcher__1',x=400, y=-15, trigger_x=100, trigger_y=300))
lb.addObject(Rotor.RotorSprite(x=420,y=260,speed=5,torque=10000))
#lb.addObject(Rotor.RotorSprite(x=180,y=160,speed=1,torque=10000))
lb.addObject(Bomb.BombSprite(x=240,y=40,width=32,height=32,static='false'))
#lb.addObject(Nut.NutSprite(x=32,y=272, eventName='onNutHitAll'))
lb.render()
|
import LevelBuilder
from sprites import *
def render(name,bg):
lb = LevelBuilder.LevelBuilder(name+".plist",background=bg)
lb.addObject(Hero.HeroSprite(x=160,y=40))
lb.addObject(EnemyBucketWithStar.EnemyBucketWithStarSprite(width=150,height=100, x=240, y=160, num_enemies=30, enemy_size=20))
lb.addObject(Beam.BeamSprite(x=240,y=160,width=10,height=10,static='true',angle=0).setName("platform"))
lb.addObject(EnemyEquipedRotor.EnemyEquipedRotorSprite(x=240,y=250,scaling=1.0,speed=3,torque=10000))
lb.addObject(CyclingEnemyObject.CyclingEnemyObjectSprite(name='num1',x=65,y=160,width=80,height=80,enemy_size=20))
#lb.addObject(CyclingEnemyObject.CyclingEnemyObjectSprite(name='num2',x=420,y=160,width=80,height=80,enemy_size=20))
distJoint = Joints.DistanceJoint(body1="Bomb",body2="platform",damping='10',freq='0')
lb.addObject(distJoint)
lb.addObject(Friend.FriendSprite(x=300,y=125,width=64,height=64,denstity=0.02))
lb.addObject(Beam.BeamSprite(x=480,y=-100,width=200,height=200,static='true',angle=45))
lb.addObject(Beam.BeamSprite(x=0,y=-100,width=240,height=240,static='true',angle=60))
lb.addObject(Wizard.WizardSprite(x=450,y=160));
lb.addObject(Contacts.Contact(body1='Hero',body2=':hat_top',event_name='onReleaseStar'))
lb.addObject(Enemy.EnemySprite(x=240,y=160,width=50,height=50,restitution=0.8,static='false').setName('StarHolder'))
revJoint = Joints.RevoluteJoint(body1='Star',body2='StarHolder',motor_speed='50.0',torque='1000.0',
enable_motor='true',lower_angle='12',upper_angle='45',
enable_limit='false',collide_connected='false',userData='star_joint')
lb.addObject(revJoint)
lb.addObject(Contacts.Contact(body1='StarHolder',body2='Hero',event_name='onLose'))
lb.addObject(SpikeyBuddy.SpikeyBuddySprite(x=20,y=40,width=40,height=40,density=6,restitution=0.6))
lb.addObject(Launcher.LauncherSprite(name='__launcher__1',x=400, y=-15, trigger_x=100, trigger_y=300))
lb.addObject(Rotor.RotorSprite(x=420,y=260,speed=5,torque=10000))
#lb.addObject(Rotor.RotorSprite(x=180,y=160,speed=1,torque=10000))
lb.addObject(Bomb.BombSprite(x=240,y=40,width=32,height=32,static='false'))
#lb.addObject(Nut.NutSprite(x=32,y=272, eventName='onNutHitAll'))
lb.render()
|
en
| 0.205868
|
#lb.addObject(CyclingEnemyObject.CyclingEnemyObjectSprite(name='num2',x=420,y=160,width=80,height=80,enemy_size=20)) #lb.addObject(Rotor.RotorSprite(x=180,y=160,speed=1,torque=10000)) #lb.addObject(Nut.NutSprite(x=32,y=272, eventName='onNutHitAll'))
| 2.654472
| 3
|
routemaster/config/tests/test_next_states.py
|
thread/routemaster
| 13
|
6625653
|
<reponame>thread/routemaster
import pytest
from routemaster.config import (
NoNextStates,
ConstantNextState,
ContextNextStates,
ContextNextStatesOption,
)
def test_constant_next_state():
next_states = ConstantNextState(state='foo')
assert next_states.all_destinations() == ['foo']
assert next_states.next_state_for_label(None) == 'foo'
def test_no_next_states_must_not_be_called():
next_states = NoNextStates()
assert next_states.all_destinations() == []
with pytest.raises(RuntimeError):
next_states.next_state_for_label(None)
def test_context_next_states(make_context):
next_states = ContextNextStates(
path='metadata.foo',
destinations=[
ContextNextStatesOption(state='1', value=True),
ContextNextStatesOption(state='2', value=False),
],
default='3',
)
context = make_context(label='label1', metadata={'foo': True})
assert next_states.all_destinations() == ['1', '2', '3']
assert next_states.next_state_for_label(context) == '1'
def test_context_next_states_returns_default_if_no_match(make_context):
next_states = ContextNextStates(
path='metadata.foo',
destinations=[
ContextNextStatesOption(state='1', value=True),
ContextNextStatesOption(state='2', value=False),
],
default='3',
)
context = make_context(label='label1', metadata={'foo': 'bar'})
assert next_states.next_state_for_label(context) == '3'
|
import pytest
from routemaster.config import (
NoNextStates,
ConstantNextState,
ContextNextStates,
ContextNextStatesOption,
)
def test_constant_next_state():
next_states = ConstantNextState(state='foo')
assert next_states.all_destinations() == ['foo']
assert next_states.next_state_for_label(None) == 'foo'
def test_no_next_states_must_not_be_called():
next_states = NoNextStates()
assert next_states.all_destinations() == []
with pytest.raises(RuntimeError):
next_states.next_state_for_label(None)
def test_context_next_states(make_context):
next_states = ContextNextStates(
path='metadata.foo',
destinations=[
ContextNextStatesOption(state='1', value=True),
ContextNextStatesOption(state='2', value=False),
],
default='3',
)
context = make_context(label='label1', metadata={'foo': True})
assert next_states.all_destinations() == ['1', '2', '3']
assert next_states.next_state_for_label(context) == '1'
def test_context_next_states_returns_default_if_no_match(make_context):
next_states = ContextNextStates(
path='metadata.foo',
destinations=[
ContextNextStatesOption(state='1', value=True),
ContextNextStatesOption(state='2', value=False),
],
default='3',
)
context = make_context(label='label1', metadata={'foo': 'bar'})
assert next_states.next_state_for_label(context) == '3'
|
none
| 1
| 2.099654
| 2
|
|
bases.py
|
rhbvkleef/aoc-2018
| 0
|
6625654
|
# Copyright 2018 <NAME>
# This library is licensed under the BSD 3-clause license. This means that you
# are allowed to do almost anything with it. For exact terms, please refer to
# the attached license file.
import os
from abc import ABC, abstractmethod
from unittest import TestCase
from typing import Tuple, Any, List
class Day(ABC):
data: str
examples_1: List[Tuple[str, Any]]
examples_2: List[Tuple[str, Any]]
def __init__(self, load_data=True):
if not load_data:
return
path = os.path.join(*type(self).__module__.split('.')[:-1])
file = open("{}/input.txt".format(path), "r")
self.data = file.read().strip()
file.close()
@abstractmethod
def part1(self):
pass
@abstractmethod
def part2(self):
pass
class DayTest(TestCase):
solution: Day
puzzles: Tuple
# noinspection PyPep8Naming
def __init__(self, testName, solution, puzzles=(1, 2)):
super(DayTest, self).__init__(testName)
self.solution = solution
self.not_skip = puzzles
def test_part1(self):
if 1 not in self.not_skip:
self.skipTest('This test is not specified.')
if not hasattr(self.solution, 'examples_1'):
self.fail('There are no tests for part 1')
if len(self.solution.examples_1) <= 0:
self.skipTest('No tests are provided.')
for data, answer in self.solution.examples_1:
self.solution.data = data
self.assertEqual(answer, self.solution.part1(istest=True))
def test_part2(self):
if 2 not in self.not_skip:
self.skipTest('This test is not specified.')
if not hasattr(self.solution, 'examples_2'):
self.fail('There are no tests for part 2')
if len(self.solution.examples_2) <= 0:
self.skipTest('No tests are provided.')
for data, answer in self.solution.examples_2:
self.solution.data = data
self.assertEqual(answer, self.solution.part2(istest=True))
@staticmethod
def new(day, solution, puzzle, puzzles):
cls = type('{}'.format(day), (DayTest, ), {'__module__': 'days'})
return cls('test_part{}'.format(puzzle), solution=solution,
puzzles=puzzles)
|
# Copyright 2018 <NAME>
# This library is licensed under the BSD 3-clause license. This means that you
# are allowed to do almost anything with it. For exact terms, please refer to
# the attached license file.
import os
from abc import ABC, abstractmethod
from unittest import TestCase
from typing import Tuple, Any, List
class Day(ABC):
data: str
examples_1: List[Tuple[str, Any]]
examples_2: List[Tuple[str, Any]]
def __init__(self, load_data=True):
if not load_data:
return
path = os.path.join(*type(self).__module__.split('.')[:-1])
file = open("{}/input.txt".format(path), "r")
self.data = file.read().strip()
file.close()
@abstractmethod
def part1(self):
pass
@abstractmethod
def part2(self):
pass
class DayTest(TestCase):
solution: Day
puzzles: Tuple
# noinspection PyPep8Naming
def __init__(self, testName, solution, puzzles=(1, 2)):
super(DayTest, self).__init__(testName)
self.solution = solution
self.not_skip = puzzles
def test_part1(self):
if 1 not in self.not_skip:
self.skipTest('This test is not specified.')
if not hasattr(self.solution, 'examples_1'):
self.fail('There are no tests for part 1')
if len(self.solution.examples_1) <= 0:
self.skipTest('No tests are provided.')
for data, answer in self.solution.examples_1:
self.solution.data = data
self.assertEqual(answer, self.solution.part1(istest=True))
def test_part2(self):
if 2 not in self.not_skip:
self.skipTest('This test is not specified.')
if not hasattr(self.solution, 'examples_2'):
self.fail('There are no tests for part 2')
if len(self.solution.examples_2) <= 0:
self.skipTest('No tests are provided.')
for data, answer in self.solution.examples_2:
self.solution.data = data
self.assertEqual(answer, self.solution.part2(istest=True))
@staticmethod
def new(day, solution, puzzle, puzzles):
cls = type('{}'.format(day), (DayTest, ), {'__module__': 'days'})
return cls('test_part{}'.format(puzzle), solution=solution,
puzzles=puzzles)
|
en
| 0.957602
|
# Copyright 2018 <NAME> # This library is licensed under the BSD 3-clause license. This means that you # are allowed to do almost anything with it. For exact terms, please refer to # the attached license file. # noinspection PyPep8Naming
| 3.436379
| 3
|
openmdao/util/array_util.py
|
colinxs/OpenMDAO
| 17
|
6625655
|
""" Some useful array utilities. """
import sys
from six.moves import range, zip
import numpy as np
from numpy import ndarray
from itertools import product
def array_idx_iter(shape):
"""
Return an iterator over the indices into a n-dimensional array.
Args
----
shape : tuple
shape of the array.
"""
for p in product(*[range(s) for s in shape]):
yield p
def evenly_distrib_idxs(num_divisions, arr_size):
"""
Given a number of divisions and the size of an array, chop the array up
into pieces according to number of divisions, keeping the distribution
of entries as even as possible.
Args
----
num_divisions : int
Number of parts to divide the array into.
arr_size : int
Number of entries in the array.
Returns
-------
tuple
a tuple of (sizes, offsets), where sizes and offsets contain values for all
divisions.
"""
base = arr_size // num_divisions
leftover = arr_size % num_divisions
sizes = np.ones(num_divisions, dtype="int") * base
# evenly distribute the remainder across size-leftover procs,
# instead of giving the whole remainder to one proc
sizes[:leftover] += 1
offsets = np.zeros(num_divisions, dtype="int")
offsets[1:] = np.cumsum(sizes)[:-1]
return sizes, offsets
def to_slice(idxs):
"""Convert an index array to a slice if possible. Otherwise,
return the index array. Indices are assumed to be sorted in
ascending order.
"""
if len(idxs) == 1:
return slice(idxs[0], idxs[0]+1)
elif len(idxs) == 0:
return idxs
stride = idxs[1]-idxs[0]
if stride <= 0:
return idxs
#make sure stride is consistent throughout the array
if any(idxs[1:]-idxs[:-1] != stride):
return idxs
# set the upper bound to idxs[-1]+stride instead of idxs[-1]+1 because
# later, we compare upper and lower bounds when collapsing slices
return slice(idxs[0], idxs[-1]+stride, stride)
|
""" Some useful array utilities. """
import sys
from six.moves import range, zip
import numpy as np
from numpy import ndarray
from itertools import product
def array_idx_iter(shape):
"""
Return an iterator over the indices into a n-dimensional array.
Args
----
shape : tuple
shape of the array.
"""
for p in product(*[range(s) for s in shape]):
yield p
def evenly_distrib_idxs(num_divisions, arr_size):
"""
Given a number of divisions and the size of an array, chop the array up
into pieces according to number of divisions, keeping the distribution
of entries as even as possible.
Args
----
num_divisions : int
Number of parts to divide the array into.
arr_size : int
Number of entries in the array.
Returns
-------
tuple
a tuple of (sizes, offsets), where sizes and offsets contain values for all
divisions.
"""
base = arr_size // num_divisions
leftover = arr_size % num_divisions
sizes = np.ones(num_divisions, dtype="int") * base
# evenly distribute the remainder across size-leftover procs,
# instead of giving the whole remainder to one proc
sizes[:leftover] += 1
offsets = np.zeros(num_divisions, dtype="int")
offsets[1:] = np.cumsum(sizes)[:-1]
return sizes, offsets
def to_slice(idxs):
"""Convert an index array to a slice if possible. Otherwise,
return the index array. Indices are assumed to be sorted in
ascending order.
"""
if len(idxs) == 1:
return slice(idxs[0], idxs[0]+1)
elif len(idxs) == 0:
return idxs
stride = idxs[1]-idxs[0]
if stride <= 0:
return idxs
#make sure stride is consistent throughout the array
if any(idxs[1:]-idxs[:-1] != stride):
return idxs
# set the upper bound to idxs[-1]+stride instead of idxs[-1]+1 because
# later, we compare upper and lower bounds when collapsing slices
return slice(idxs[0], idxs[-1]+stride, stride)
|
en
| 0.771655
|
Some useful array utilities. Return an iterator over the indices into a n-dimensional array. Args ---- shape : tuple shape of the array. Given a number of divisions and the size of an array, chop the array up into pieces according to number of divisions, keeping the distribution of entries as even as possible. Args ---- num_divisions : int Number of parts to divide the array into. arr_size : int Number of entries in the array. Returns ------- tuple a tuple of (sizes, offsets), where sizes and offsets contain values for all divisions. # evenly distribute the remainder across size-leftover procs, # instead of giving the whole remainder to one proc Convert an index array to a slice if possible. Otherwise, return the index array. Indices are assumed to be sorted in ascending order. #make sure stride is consistent throughout the array # set the upper bound to idxs[-1]+stride instead of idxs[-1]+1 because # later, we compare upper and lower bounds when collapsing slices
| 3.264871
| 3
|
data_process/vocab.py
|
maopademiao/mams
| 0
|
6625656
|
import operator
from src.module.utils.constants import PAD, UNK, ASPECT
class Vocab(object):
def __init__(self):
self._count_dict = dict()
self._predefined_list = [PAD, UNK, ASPECT]
def add(self, word):
if word in self._count_dict:
self._count_dict[word] += 1
else:
self._count_dict[word] = 1
def add_list(self, words):
for word in words:
self.add(word)
def get_vocab(self, max_size=None, min_freq=0):
sorted_words = sorted(self._count_dict.items(), key=operator.itemgetter(1), reverse=True)
word2index = {}
for word in self._predefined_list:
word2index[word] = len(word2index)
for word, freq in sorted_words:
if word in word2index:
continue
if (max_size is not None and len(word2index) >= max_size) or freq < min_freq:
word2index[word] = word2index[UNK]
else:
word2index[word] = len(word2index)
index2word = {}
index2word[word2index[UNK]] = UNK
for word, index in word2index.items():
if index == word2index[UNK]:
continue
else:
index2word[index] = word
return word2index, index2word
|
import operator
from src.module.utils.constants import PAD, UNK, ASPECT
class Vocab(object):
def __init__(self):
self._count_dict = dict()
self._predefined_list = [PAD, UNK, ASPECT]
def add(self, word):
if word in self._count_dict:
self._count_dict[word] += 1
else:
self._count_dict[word] = 1
def add_list(self, words):
for word in words:
self.add(word)
def get_vocab(self, max_size=None, min_freq=0):
sorted_words = sorted(self._count_dict.items(), key=operator.itemgetter(1), reverse=True)
word2index = {}
for word in self._predefined_list:
word2index[word] = len(word2index)
for word, freq in sorted_words:
if word in word2index:
continue
if (max_size is not None and len(word2index) >= max_size) or freq < min_freq:
word2index[word] = word2index[UNK]
else:
word2index[word] = len(word2index)
index2word = {}
index2word[word2index[UNK]] = UNK
for word, index in word2index.items():
if index == word2index[UNK]:
continue
else:
index2word[index] = word
return word2index, index2word
|
none
| 1
| 3.037024
| 3
|
|
Day 04/MonkandNiceStrings.py
|
sandeep-krishna/100DaysOfCode
| 0
|
6625657
|
'''
*** Problem ***
Monk and Nice Strings
Monk's best friend Micro's birthday is coming up. Micro likes Nice Strings very much, so Monk decided to gift him one. Monk is having N nice strings, so he'll choose one from those. But before he selects one, he need to know the Niceness value of all of those. Strings are arranged in an array A, and the Niceness value of string at position i is defined as the number of strings having position less than i which are lexicographicaly smaller than . Since nowadays, Monk is very busy with the Code Monk Series, he asked for your help.
Note: Array's index starts from 1.
Input:
First line consists of a single integer denoting N.
N lines follow each containing a string made of lower case English alphabets.
Output:
Print N lines, each containing an integer, where the integer in line denotes Niceness value of string .
'''
# Solution
n = int(input())
a = []
for i in range(n):
a.append(input())
ans = 0
for j in a:
if j < a[i]:
ans += 1
print(ans)
|
'''
*** Problem ***
Monk and Nice Strings
Monk's best friend Micro's birthday is coming up. Micro likes Nice Strings very much, so Monk decided to gift him one. Monk is having N nice strings, so he'll choose one from those. But before he selects one, he need to know the Niceness value of all of those. Strings are arranged in an array A, and the Niceness value of string at position i is defined as the number of strings having position less than i which are lexicographicaly smaller than . Since nowadays, Monk is very busy with the Code Monk Series, he asked for your help.
Note: Array's index starts from 1.
Input:
First line consists of a single integer denoting N.
N lines follow each containing a string made of lower case English alphabets.
Output:
Print N lines, each containing an integer, where the integer in line denotes Niceness value of string .
'''
# Solution
n = int(input())
a = []
for i in range(n):
a.append(input())
ans = 0
for j in a:
if j < a[i]:
ans += 1
print(ans)
|
en
| 0.936643
|
*** Problem *** Monk and Nice Strings Monk's best friend Micro's birthday is coming up. Micro likes Nice Strings very much, so Monk decided to gift him one. Monk is having N nice strings, so he'll choose one from those. But before he selects one, he need to know the Niceness value of all of those. Strings are arranged in an array A, and the Niceness value of string at position i is defined as the number of strings having position less than i which are lexicographicaly smaller than . Since nowadays, Monk is very busy with the Code Monk Series, he asked for your help. Note: Array's index starts from 1. Input: First line consists of a single integer denoting N. N lines follow each containing a string made of lower case English alphabets. Output: Print N lines, each containing an integer, where the integer in line denotes Niceness value of string . # Solution
| 3.88185
| 4
|
pagination_vm/merge.py
|
recs12/pagination_vm
| 0
|
6625658
|
from PyPDF4 import PdfFileReader, PdfFileWriter
from PyPDF4.pdf import PageObject
def paginate_pdf(pdf_name, number_page, pagination_template):
"""Create a new pdf with the pagination footer
starting from the second page of the manual.
"""
writer = PdfFileWriter()
stream_manuals = open(pdf_name, "rb")
manuals = PdfFileReader(stream_manuals)
stream_pagination = open(pagination_template, "rb")
pagination = PdfFileReader(stream_pagination)
for i in range(number_page):
manuals_page = manuals.getPage(i) # pageNumber: 0
pagination_page = pagination.getPage(i)
# Stack blank page
translated_page = PageObject.createBlankPage(
None,
width=manuals_page.mediaBox.getHeight(), # width = 1224
height=manuals_page.mediaBox.getWidth(), # height = 792
)
# Stack pagination
translated_page.mergePage(pagination_page)
# Stack manual
translated_page.mergeRotatedScaledTranslatedPage(
manuals_page, rotation=-90, scale=1, tx=0, ty=792, expand=True
)
writer.addPage(translated_page)
pdf_out = "./paginatedPDFs/"+pdf_name # New name of the output pdf file.
with open(pdf_out, "wb") as _:
writer.write(_)
stream_manuals.close()
stream_pagination.close()
print(f"{pdf_out} copied to paginatedPDFs\n")
|
from PyPDF4 import PdfFileReader, PdfFileWriter
from PyPDF4.pdf import PageObject
def paginate_pdf(pdf_name, number_page, pagination_template):
"""Create a new pdf with the pagination footer
starting from the second page of the manual.
"""
writer = PdfFileWriter()
stream_manuals = open(pdf_name, "rb")
manuals = PdfFileReader(stream_manuals)
stream_pagination = open(pagination_template, "rb")
pagination = PdfFileReader(stream_pagination)
for i in range(number_page):
manuals_page = manuals.getPage(i) # pageNumber: 0
pagination_page = pagination.getPage(i)
# Stack blank page
translated_page = PageObject.createBlankPage(
None,
width=manuals_page.mediaBox.getHeight(), # width = 1224
height=manuals_page.mediaBox.getWidth(), # height = 792
)
# Stack pagination
translated_page.mergePage(pagination_page)
# Stack manual
translated_page.mergeRotatedScaledTranslatedPage(
manuals_page, rotation=-90, scale=1, tx=0, ty=792, expand=True
)
writer.addPage(translated_page)
pdf_out = "./paginatedPDFs/"+pdf_name # New name of the output pdf file.
with open(pdf_out, "wb") as _:
writer.write(_)
stream_manuals.close()
stream_pagination.close()
print(f"{pdf_out} copied to paginatedPDFs\n")
|
en
| 0.538737
|
Create a new pdf with the pagination footer starting from the second page of the manual. # pageNumber: 0 # Stack blank page # width = 1224 # height = 792 # Stack pagination # Stack manual # New name of the output pdf file.
| 3.359417
| 3
|
stupid/meta.py
|
masell/stupid
| 0
|
6625659
|
from abc import ABCMeta
from dataclasses import dataclass
class StupidMeta(ABCMeta):
def __new__(mcls, name, bases, namespace, **kwargs):
inherited_annotations = {}
for base in bases:
try:
inherited_annotations.update(base.__annotations__)
except AttributeError:
pass
try:
annotations = dict(**namespace['__annotations__'], **inherited_annotations)
except KeyError:
annotations = inherited_annotations
if annotations:
namespace['__annotations__'] = annotations
namespace['__slots__'] = ()
bases = tuple(base.__stupid__ if hasattr(base, '__stupid__') else base for base in bases)
cls = super().__new__(mcls, name, bases, namespace, **kwargs)
a_list = {}
a_kw = {}
for fieldname,field in annotations.items():
try:
getattr(cls, fieldname)
a_kw[fieldname] = field
except:
a_list[fieldname] = field
namespace['__annotations__'] = {**a_list, **a_kw}
slots = tuple(annotations.keys())
{slot: namespace.pop(slot, None) for slot in slots}
namespace['__slots__'] = slots
namespace['__stupid__'] = cls
ncls = super().__new__(mcls, name, bases, namespace, **kwargs)
return ncls
def __getattribute__(self, name):
slots = super().__getattribute__('__slots__')
if name in slots:
stupid = super().__getattribute__('__stupid__')
return getattr(stupid, name)
return super().__getattribute__(name)
def __instancecheck__(cls, instance):
return super(StupidMeta, cls.__stupid__).__instancecheck__(instance)
def __subclasscheck__(cls, subclass):
try:
stupid = cls.__stupid__
except AttributeError:
stupid = cls
return super(StupidMeta, stupid).__subclasscheck__(subclass)
class Stupid(metaclass=StupidMeta):
__slots__ = ()
class StupidDataMeta(StupidMeta):
def __new__(mcls, name, bases, namespace, **kwargs):
ncls = dataclass(super().__new__(mcls, name, bases, namespace, **kwargs))
return ncls
class StupidData(metaclass=StupidDataMeta):
__slots__ = ()
|
from abc import ABCMeta
from dataclasses import dataclass
class StupidMeta(ABCMeta):
def __new__(mcls, name, bases, namespace, **kwargs):
inherited_annotations = {}
for base in bases:
try:
inherited_annotations.update(base.__annotations__)
except AttributeError:
pass
try:
annotations = dict(**namespace['__annotations__'], **inherited_annotations)
except KeyError:
annotations = inherited_annotations
if annotations:
namespace['__annotations__'] = annotations
namespace['__slots__'] = ()
bases = tuple(base.__stupid__ if hasattr(base, '__stupid__') else base for base in bases)
cls = super().__new__(mcls, name, bases, namespace, **kwargs)
a_list = {}
a_kw = {}
for fieldname,field in annotations.items():
try:
getattr(cls, fieldname)
a_kw[fieldname] = field
except:
a_list[fieldname] = field
namespace['__annotations__'] = {**a_list, **a_kw}
slots = tuple(annotations.keys())
{slot: namespace.pop(slot, None) for slot in slots}
namespace['__slots__'] = slots
namespace['__stupid__'] = cls
ncls = super().__new__(mcls, name, bases, namespace, **kwargs)
return ncls
def __getattribute__(self, name):
slots = super().__getattribute__('__slots__')
if name in slots:
stupid = super().__getattribute__('__stupid__')
return getattr(stupid, name)
return super().__getattribute__(name)
def __instancecheck__(cls, instance):
return super(StupidMeta, cls.__stupid__).__instancecheck__(instance)
def __subclasscheck__(cls, subclass):
try:
stupid = cls.__stupid__
except AttributeError:
stupid = cls
return super(StupidMeta, stupid).__subclasscheck__(subclass)
class Stupid(metaclass=StupidMeta):
__slots__ = ()
class StupidDataMeta(StupidMeta):
def __new__(mcls, name, bases, namespace, **kwargs):
ncls = dataclass(super().__new__(mcls, name, bases, namespace, **kwargs))
return ncls
class StupidData(metaclass=StupidDataMeta):
__slots__ = ()
|
none
| 1
| 2.663021
| 3
|
|
compressible_examples/schur_complement_solver.py
|
thomasgibson/firedrake-hybridization
| 0
|
6625660
|
<reponame>thomasgibson/firedrake-hybridization
from firedrake import (split, LinearVariationalProblem, Constant,
LinearVariationalSolver, TestFunctions, TrialFunctions,
TestFunction, TrialFunction, lhs, rhs, DirichletBC, FacetNormal,
div, dx, jump, avg, dS_v, dS_h, ds_v, ds_t, ds_b, ds_tb, inner,
dot, grad, Function, VectorSpaceBasis, BrokenElement,
FunctionSpace, MixedFunctionSpace)
from firedrake.petsc import flatten_parameters, PETSc
from firedrake.parloops import par_loop, READ, INC
from pyop2.profiling import timed_function, timed_region
from gusto.linear_solvers import TimesteppingSolver
from gusto.configuration import logger, DEBUG
from gusto import thermodynamics
__all__ = ['OldCompressibleSolver']
class OldCompressibleSolver(TimesteppingSolver):
"""
Timestepping linear solver object for the compressible equations
in theta-pi formulation with prognostic variables u,rho,theta.
This solver follows the following strategy:
(1) Analytically eliminate theta (introduces error near topography)
(2) Solve resulting system for (u,rho) using a Schur preconditioner
(3) Reconstruct theta
:arg state: a :class:`.State` object containing everything else.
:arg quadrature degree: tuple (q_h, q_v) where q_h is the required
quadrature degree in the horizontal direction and q_v is that in
the vertical direction
:arg solver_parameters (optional): solver parameters
:arg overwrite_solver_parameters: boolean, if True use only the
solver_parameters that have been passed in, if False then update
the default solver parameters with the solver_parameters passed in.
:arg moisture (optional): list of names of moisture fields.
"""
solver_parameters = {
'pc_type': 'fieldsplit',
'pc_fieldsplit_type': 'schur',
'ksp_type': 'gcr',
'ksp_monitor_true_residual': None,
'ksp_max_it': 100,
'pc_fieldsplit_schur_fact_type': 'FULL',
'pc_fieldsplit_schur_precondition': 'selfp',
'fieldsplit_0': {'ksp_type': 'preonly',
'pc_type': 'bjacobi',
'sub_pc_type': 'ilu'},
'fieldsplit_1': {'ksp_type': 'fgmres',
'ksp_monitor_true_residual': None,
'ksp_rtol': 1.0e-8,
'ksp_atol': 1.0e-8,
'ksp_max_it': 100,
'pc_type': 'gamg',
'pc_gamg_sym_graph': None,
'mg_levels': {'ksp_type': 'gmres',
'ksp_max_it': 5,
'pc_type': 'bjacobi',
'sub_pc_type': 'ilu'}}
}
def __init__(self, state, quadrature_degree=None, solver_parameters=None,
overwrite_solver_parameters=False, moisture=None):
self.moisture = moisture
if quadrature_degree is not None:
self.quadrature_degree = quadrature_degree
else:
dgspace = state.spaces("DG")
if any(deg > 2 for deg in dgspace.ufl_element().degree()):
logger.warning("default quadrature degree most likely not sufficient for this degree element")
self.quadrature_degree = (5, 5)
super().__init__(state, solver_parameters, overwrite_solver_parameters)
@timed_function("Gusto:SolverSetup")
def _setup_solver(self):
state = self.state # just cutting down line length a bit
Dt = state.timestepping.dt
beta_ = Dt*state.timestepping.alpha
cp = state.parameters.cp
mu = state.mu
Vu = state.spaces("HDiv")
Vtheta = state.spaces("HDiv_v")
Vrho = state.spaces("DG")
# Store time-stepping coefficients as UFL Constants
dt = Constant(Dt)
beta = Constant(beta_)
beta_cp = Constant(beta_ * cp)
# Split up the rhs vector (symbolically)
u_in, rho_in, theta_in = split(state.xrhs)
# Build the reduced function space for u,rho
M = MixedFunctionSpace((Vu, Vrho))
w, phi = TestFunctions(M)
u, rho = TrialFunctions(M)
n = FacetNormal(state.mesh)
# Get background fields
thetabar = state.fields("thetabar")
rhobar = state.fields("rhobar")
pibar = thermodynamics.pi(state.parameters, rhobar, thetabar)
pibar_rho = thermodynamics.pi_rho(state.parameters, rhobar, thetabar)
pibar_theta = thermodynamics.pi_theta(state.parameters, rhobar, thetabar)
# Analytical (approximate) elimination of theta
k = state.k # Upward pointing unit vector
theta = -dot(k, u)*dot(k, grad(thetabar))*beta + theta_in
# Only include theta' (rather than pi') in the vertical
# component of the gradient
# the pi prime term (here, bars are for mean and no bars are
# for linear perturbations)
pi = pibar_theta*theta + pibar_rho*rho
# vertical projection
def V(u):
return k*inner(u, k)
# specify degree for some terms as estimated degree is too large
dxp = dx(degree=(self.quadrature_degree))
dS_vp = dS_v(degree=(self.quadrature_degree))
# add effect of density of water upon theta
if self.moisture is not None:
water_t = Function(Vtheta).assign(0.0)
for water in self.moisture:
water_t += self.state.fields(water)
theta_w = theta / (1 + water_t)
thetabar_w = thetabar / (1 + water_t)
else:
theta_w = theta
thetabar_w = thetabar
eqn = (
inner(w, (state.h_project(u) - u_in))*dx
- beta_cp*div(theta_w*V(w))*pibar*dxp
# following does nothing but is preserved in the comments
# to remind us why (because V(w) is purely vertical).
# + beta_cp*jump(theta*V(w), n)*avg(pibar)*dS_v
- beta_cp*div(thetabar_w*w)*pi*dxp
+ beta_cp*jump(thetabar_w*w, n)*avg(pi)*dS_vp
+ (phi*(rho - rho_in) - beta*inner(grad(phi), u)*rhobar)*dx
+ beta*jump(phi*u, n)*avg(rhobar)*(dS_v + dS_h)
)
if mu is not None:
eqn += dt*mu*inner(w, k)*inner(u, k)*dx
aeqn = lhs(eqn)
Leqn = rhs(eqn)
# Place to put result of u rho solver
self.urho = Function(M)
# Boundary conditions (assumes extruded mesh)
bcs = [DirichletBC(M.sub(0), 0.0, "bottom"),
DirichletBC(M.sub(0), 0.0, "top")]
# Solver for u, rho
urho_problem = LinearVariationalProblem(
aeqn, Leqn, self.urho, bcs=bcs)
self.urho_solver = LinearVariationalSolver(urho_problem,
solver_parameters=self.solver_parameters,
options_prefix='ImplicitSolver')
# Reconstruction of theta
theta = TrialFunction(Vtheta)
gamma = TestFunction(Vtheta)
u, rho = self.urho.split()
self.theta = Function(Vtheta)
theta_eqn = gamma*(theta - theta_in
+ dot(k, u)*dot(k, grad(thetabar))*beta)*dx
theta_problem = LinearVariationalProblem(lhs(theta_eqn),
rhs(theta_eqn),
self.theta)
self.theta_solver = LinearVariationalSolver(theta_problem,
options_prefix='thetabacksubstitution')
@timed_function("Gusto:SchurCompLinearSolve")
def solve(self):
"""
Apply the solver with rhs state.xrhs and result state.dy.
"""
with timed_region("Gusto:VelocityDensitySolve"):
self.urho_solver.solve()
u1, rho1 = self.urho.split()
u, rho, theta = self.state.dy.split()
u.assign(u1)
rho.assign(rho1)
with timed_region("Gusto:ThetaRecon"):
self.theta_solver.solve()
theta.assign(self.theta)
|
from firedrake import (split, LinearVariationalProblem, Constant,
LinearVariationalSolver, TestFunctions, TrialFunctions,
TestFunction, TrialFunction, lhs, rhs, DirichletBC, FacetNormal,
div, dx, jump, avg, dS_v, dS_h, ds_v, ds_t, ds_b, ds_tb, inner,
dot, grad, Function, VectorSpaceBasis, BrokenElement,
FunctionSpace, MixedFunctionSpace)
from firedrake.petsc import flatten_parameters, PETSc
from firedrake.parloops import par_loop, READ, INC
from pyop2.profiling import timed_function, timed_region
from gusto.linear_solvers import TimesteppingSolver
from gusto.configuration import logger, DEBUG
from gusto import thermodynamics
__all__ = ['OldCompressibleSolver']
class OldCompressibleSolver(TimesteppingSolver):
"""
Timestepping linear solver object for the compressible equations
in theta-pi formulation with prognostic variables u,rho,theta.
This solver follows the following strategy:
(1) Analytically eliminate theta (introduces error near topography)
(2) Solve resulting system for (u,rho) using a Schur preconditioner
(3) Reconstruct theta
:arg state: a :class:`.State` object containing everything else.
:arg quadrature degree: tuple (q_h, q_v) where q_h is the required
quadrature degree in the horizontal direction and q_v is that in
the vertical direction
:arg solver_parameters (optional): solver parameters
:arg overwrite_solver_parameters: boolean, if True use only the
solver_parameters that have been passed in, if False then update
the default solver parameters with the solver_parameters passed in.
:arg moisture (optional): list of names of moisture fields.
"""
solver_parameters = {
'pc_type': 'fieldsplit',
'pc_fieldsplit_type': 'schur',
'ksp_type': 'gcr',
'ksp_monitor_true_residual': None,
'ksp_max_it': 100,
'pc_fieldsplit_schur_fact_type': 'FULL',
'pc_fieldsplit_schur_precondition': 'selfp',
'fieldsplit_0': {'ksp_type': 'preonly',
'pc_type': 'bjacobi',
'sub_pc_type': 'ilu'},
'fieldsplit_1': {'ksp_type': 'fgmres',
'ksp_monitor_true_residual': None,
'ksp_rtol': 1.0e-8,
'ksp_atol': 1.0e-8,
'ksp_max_it': 100,
'pc_type': 'gamg',
'pc_gamg_sym_graph': None,
'mg_levels': {'ksp_type': 'gmres',
'ksp_max_it': 5,
'pc_type': 'bjacobi',
'sub_pc_type': 'ilu'}}
}
def __init__(self, state, quadrature_degree=None, solver_parameters=None,
overwrite_solver_parameters=False, moisture=None):
self.moisture = moisture
if quadrature_degree is not None:
self.quadrature_degree = quadrature_degree
else:
dgspace = state.spaces("DG")
if any(deg > 2 for deg in dgspace.ufl_element().degree()):
logger.warning("default quadrature degree most likely not sufficient for this degree element")
self.quadrature_degree = (5, 5)
super().__init__(state, solver_parameters, overwrite_solver_parameters)
@timed_function("Gusto:SolverSetup")
def _setup_solver(self):
state = self.state # just cutting down line length a bit
Dt = state.timestepping.dt
beta_ = Dt*state.timestepping.alpha
cp = state.parameters.cp
mu = state.mu
Vu = state.spaces("HDiv")
Vtheta = state.spaces("HDiv_v")
Vrho = state.spaces("DG")
# Store time-stepping coefficients as UFL Constants
dt = Constant(Dt)
beta = Constant(beta_)
beta_cp = Constant(beta_ * cp)
# Split up the rhs vector (symbolically)
u_in, rho_in, theta_in = split(state.xrhs)
# Build the reduced function space for u,rho
M = MixedFunctionSpace((Vu, Vrho))
w, phi = TestFunctions(M)
u, rho = TrialFunctions(M)
n = FacetNormal(state.mesh)
# Get background fields
thetabar = state.fields("thetabar")
rhobar = state.fields("rhobar")
pibar = thermodynamics.pi(state.parameters, rhobar, thetabar)
pibar_rho = thermodynamics.pi_rho(state.parameters, rhobar, thetabar)
pibar_theta = thermodynamics.pi_theta(state.parameters, rhobar, thetabar)
# Analytical (approximate) elimination of theta
k = state.k # Upward pointing unit vector
theta = -dot(k, u)*dot(k, grad(thetabar))*beta + theta_in
# Only include theta' (rather than pi') in the vertical
# component of the gradient
# the pi prime term (here, bars are for mean and no bars are
# for linear perturbations)
pi = pibar_theta*theta + pibar_rho*rho
# vertical projection
def V(u):
return k*inner(u, k)
# specify degree for some terms as estimated degree is too large
dxp = dx(degree=(self.quadrature_degree))
dS_vp = dS_v(degree=(self.quadrature_degree))
# add effect of density of water upon theta
if self.moisture is not None:
water_t = Function(Vtheta).assign(0.0)
for water in self.moisture:
water_t += self.state.fields(water)
theta_w = theta / (1 + water_t)
thetabar_w = thetabar / (1 + water_t)
else:
theta_w = theta
thetabar_w = thetabar
eqn = (
inner(w, (state.h_project(u) - u_in))*dx
- beta_cp*div(theta_w*V(w))*pibar*dxp
# following does nothing but is preserved in the comments
# to remind us why (because V(w) is purely vertical).
# + beta_cp*jump(theta*V(w), n)*avg(pibar)*dS_v
- beta_cp*div(thetabar_w*w)*pi*dxp
+ beta_cp*jump(thetabar_w*w, n)*avg(pi)*dS_vp
+ (phi*(rho - rho_in) - beta*inner(grad(phi), u)*rhobar)*dx
+ beta*jump(phi*u, n)*avg(rhobar)*(dS_v + dS_h)
)
if mu is not None:
eqn += dt*mu*inner(w, k)*inner(u, k)*dx
aeqn = lhs(eqn)
Leqn = rhs(eqn)
# Place to put result of u rho solver
self.urho = Function(M)
# Boundary conditions (assumes extruded mesh)
bcs = [DirichletBC(M.sub(0), 0.0, "bottom"),
DirichletBC(M.sub(0), 0.0, "top")]
# Solver for u, rho
urho_problem = LinearVariationalProblem(
aeqn, Leqn, self.urho, bcs=bcs)
self.urho_solver = LinearVariationalSolver(urho_problem,
solver_parameters=self.solver_parameters,
options_prefix='ImplicitSolver')
# Reconstruction of theta
theta = TrialFunction(Vtheta)
gamma = TestFunction(Vtheta)
u, rho = self.urho.split()
self.theta = Function(Vtheta)
theta_eqn = gamma*(theta - theta_in
+ dot(k, u)*dot(k, grad(thetabar))*beta)*dx
theta_problem = LinearVariationalProblem(lhs(theta_eqn),
rhs(theta_eqn),
self.theta)
self.theta_solver = LinearVariationalSolver(theta_problem,
options_prefix='thetabacksubstitution')
@timed_function("Gusto:SchurCompLinearSolve")
def solve(self):
"""
Apply the solver with rhs state.xrhs and result state.dy.
"""
with timed_region("Gusto:VelocityDensitySolve"):
self.urho_solver.solve()
u1, rho1 = self.urho.split()
u, rho, theta = self.state.dy.split()
u.assign(u1)
rho.assign(rho1)
with timed_region("Gusto:ThetaRecon"):
self.theta_solver.solve()
theta.assign(self.theta)
|
en
| 0.800861
|
Timestepping linear solver object for the compressible equations in theta-pi formulation with prognostic variables u,rho,theta. This solver follows the following strategy: (1) Analytically eliminate theta (introduces error near topography) (2) Solve resulting system for (u,rho) using a Schur preconditioner (3) Reconstruct theta :arg state: a :class:`.State` object containing everything else. :arg quadrature degree: tuple (q_h, q_v) where q_h is the required quadrature degree in the horizontal direction and q_v is that in the vertical direction :arg solver_parameters (optional): solver parameters :arg overwrite_solver_parameters: boolean, if True use only the solver_parameters that have been passed in, if False then update the default solver parameters with the solver_parameters passed in. :arg moisture (optional): list of names of moisture fields. # just cutting down line length a bit # Store time-stepping coefficients as UFL Constants # Split up the rhs vector (symbolically) # Build the reduced function space for u,rho # Get background fields # Analytical (approximate) elimination of theta # Upward pointing unit vector # Only include theta' (rather than pi') in the vertical # component of the gradient # the pi prime term (here, bars are for mean and no bars are # for linear perturbations) # vertical projection # specify degree for some terms as estimated degree is too large # add effect of density of water upon theta # following does nothing but is preserved in the comments # to remind us why (because V(w) is purely vertical). # + beta_cp*jump(theta*V(w), n)*avg(pibar)*dS_v # Place to put result of u rho solver # Boundary conditions (assumes extruded mesh) # Solver for u, rho # Reconstruction of theta Apply the solver with rhs state.xrhs and result state.dy.
| 2.15922
| 2
|
test/input_gen/genModelTests.py
|
dongju-chae/nntrainer
| 0
|
6625661
|
<gh_stars>0
#!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
##
# Copyright (C) 2020 <NAME> <<EMAIL>>
#
# @file getModelTests.py
# @date 13 October 2020
# @brief Generate tc using KerasRecorder
# @author <NAME> <<EMAIL>>
import warnings
from recorder import KerasRecorder
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import numpy as np
import tensorflow as tf
from tensorflow.python import keras as K
if __name__ == "__main__":
inp = K.Input(shape=(3, 3))
a = K.layers.Dense(5)(inp)
b = K.layers.Dense(5)(a)
c = K.layers.Dense(10)(b)
d = K.layers.Activation("softmax")(c)
KerasRecorder(
file_name="fc_softmax_mse.info",
inputs=inp,
outputs=[inp, a, b, c, d],
input_shape=(3, 3),
label_shape=(3, 10),
loss_fn=tf.keras.losses.MeanSquaredError(),
).run(10)
inp = K.Input(shape=(3, 3))
a = K.layers.Dense(10)(inp)
b = K.layers.Activation("relu")(a)
c = K.layers.Dense(10)(b)
d = K.layers.Activation("relu")(c)
e = K.layers.Dense(2)(d)
f = K.layers.Activation("relu")(e)
KerasRecorder(
file_name="fc_relu_mse.info",
inputs=inp,
outputs=[inp, a, b, c, d, e, f],
input_shape=(3, 3),
label_shape=(3, 2),
loss_fn=tf.keras.losses.MeanSquaredError(),
optimizer=tf.keras.optimizers.SGD(lr=0.001)
).run(10)
|
#!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
##
# Copyright (C) 2020 <NAME> <<EMAIL>>
#
# @file getModelTests.py
# @date 13 October 2020
# @brief Generate tc using KerasRecorder
# @author <NAME> <<EMAIL>>
import warnings
from recorder import KerasRecorder
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import numpy as np
import tensorflow as tf
from tensorflow.python import keras as K
if __name__ == "__main__":
inp = K.Input(shape=(3, 3))
a = K.layers.Dense(5)(inp)
b = K.layers.Dense(5)(a)
c = K.layers.Dense(10)(b)
d = K.layers.Activation("softmax")(c)
KerasRecorder(
file_name="fc_softmax_mse.info",
inputs=inp,
outputs=[inp, a, b, c, d],
input_shape=(3, 3),
label_shape=(3, 10),
loss_fn=tf.keras.losses.MeanSquaredError(),
).run(10)
inp = K.Input(shape=(3, 3))
a = K.layers.Dense(10)(inp)
b = K.layers.Activation("relu")(a)
c = K.layers.Dense(10)(b)
d = K.layers.Activation("relu")(c)
e = K.layers.Dense(2)(d)
f = K.layers.Activation("relu")(e)
KerasRecorder(
file_name="fc_relu_mse.info",
inputs=inp,
outputs=[inp, a, b, c, d, e, f],
input_shape=(3, 3),
label_shape=(3, 2),
loss_fn=tf.keras.losses.MeanSquaredError(),
optimizer=tf.keras.optimizers.SGD(lr=0.001)
).run(10)
|
en
| 0.221761
|
#!/usr/bin/env python3 # SPDX-License-Identifier: Apache-2.0 ## # Copyright (C) 2020 <NAME> <<EMAIL>> # # @file getModelTests.py # @date 13 October 2020 # @brief Generate tc using KerasRecorder # @author <NAME> <<EMAIL>>
| 2.196177
| 2
|
moto/route53/responses.py
|
orenmazor/moto
| 1
|
6625662
|
from __future__ import unicode_literals
from jinja2 import Template
from six.moves.urllib.parse import parse_qs, urlparse
from moto.core.responses import BaseResponse
from .models import route53_backend
import xmltodict
class Route53(BaseResponse):
def list_or_create_hostzone_response(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
if request.method == "POST":
elements = xmltodict.parse(self.body)
if "HostedZoneConfig" in elements["CreateHostedZoneRequest"]:
comment = elements["CreateHostedZoneRequest"]["HostedZoneConfig"][
"Comment"
]
try:
# in boto3, this field is set directly in the xml
private_zone = elements["CreateHostedZoneRequest"][
"HostedZoneConfig"
]["PrivateZone"]
except KeyError:
# if a VPC subsection is only included in xmls params when private_zone=True,
# see boto: boto/route53/connection.py
private_zone = "VPC" in elements["CreateHostedZoneRequest"]
else:
comment = None
private_zone = False
name = elements["CreateHostedZoneRequest"]["Name"]
if name[-1] != ".":
name += "."
new_zone = route53_backend.create_hosted_zone(
name, comment=comment, private_zone=private_zone
)
template = Template(CREATE_HOSTED_ZONE_RESPONSE)
return 201, headers, template.render(zone=new_zone)
elif request.method == "GET":
all_zones = route53_backend.get_all_hosted_zones()
template = Template(LIST_HOSTED_ZONES_RESPONSE)
return 200, headers, template.render(zones=all_zones)
def list_hosted_zones_by_name_response(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
parsed_url = urlparse(full_url)
query_params = parse_qs(parsed_url.query)
dnsname = query_params.get("dnsname")
if dnsname:
dnsname = dnsname[0]
if dnsname[-1] != ".":
dnsname += "."
zones = [
zone
for zone in route53_backend.get_all_hosted_zones()
if zone.name == dnsname
]
else:
# sort by names, but with domain components reversed
# see http://boto3.readthedocs.io/en/latest/reference/services/route53.html#Route53.Client.list_hosted_zones_by_name
def sort_key(zone):
domains = zone.name.split(".")
if domains[-1] == "":
domains = domains[-1:] + domains[:-1]
return ".".join(reversed(domains))
zones = route53_backend.get_all_hosted_zones()
zones = sorted(zones, key=sort_key)
template = Template(LIST_HOSTED_ZONES_BY_NAME_RESPONSE)
return 200, headers, template.render(zones=zones, dnsname=dnsname)
def get_or_delete_hostzone_response(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
parsed_url = urlparse(full_url)
zoneid = parsed_url.path.rstrip("/").rsplit("/", 1)[1]
the_zone = route53_backend.get_hosted_zone(zoneid)
if not the_zone:
return 404, headers, "Zone %s not Found" % zoneid
if request.method == "GET":
template = Template(GET_HOSTED_ZONE_RESPONSE)
return 200, headers, template.render(zone=the_zone)
elif request.method == "DELETE":
route53_backend.delete_hosted_zone(zoneid)
return 200, headers, DELETE_HOSTED_ZONE_RESPONSE
def rrset_response(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
parsed_url = urlparse(full_url)
method = request.method
zoneid = parsed_url.path.rstrip("/").rsplit("/", 2)[1]
the_zone = route53_backend.get_hosted_zone(zoneid)
if not the_zone:
return 404, headers, "Zone %s Not Found" % zoneid
if method == "POST":
elements = xmltodict.parse(self.body)
change_list = elements["ChangeResourceRecordSetsRequest"]["ChangeBatch"][
"Changes"
]["Change"]
if not isinstance(change_list, list):
change_list = [
elements["ChangeResourceRecordSetsRequest"]["ChangeBatch"][
"Changes"
]["Change"]
]
for value in change_list:
action = value["Action"]
record_set = value["ResourceRecordSet"]
cleaned_record_name = record_set["Name"].strip(".")
cleaned_hosted_zone_name = the_zone.name.strip(".")
if not cleaned_record_name.endswith(cleaned_hosted_zone_name):
error_msg = """
An error occurred (InvalidChangeBatch) when calling the ChangeResourceRecordSets operation:
RRSet with DNS name %s is not permitted in zone %s
""" % (
record_set["Name"],
the_zone.name,
)
return 400, headers, error_msg
if not record_set["Name"].endswith("."):
record_set["Name"] += "."
if action in ("CREATE", "UPSERT"):
if "ResourceRecords" in record_set:
resource_records = list(record_set["ResourceRecords"].values())[
0
]
if not isinstance(resource_records, list):
# Depending on how many records there are, this may
# or may not be a list
resource_records = [resource_records]
record_set["ResourceRecords"] = [
x["Value"] for x in resource_records
]
if action == "CREATE":
the_zone.add_rrset(record_set)
else:
the_zone.upsert_rrset(record_set)
elif action == "DELETE":
if "SetIdentifier" in record_set:
the_zone.delete_rrset_by_id(record_set["SetIdentifier"])
else:
the_zone.delete_rrset(record_set)
return 200, headers, CHANGE_RRSET_RESPONSE
elif method == "GET":
querystring = parse_qs(parsed_url.query)
template = Template(LIST_RRSET_RESPONSE)
start_type = querystring.get("type", [None])[0]
start_name = querystring.get("name", [None])[0]
if start_type and not start_name:
return 400, headers, "The input is not valid"
record_sets = the_zone.get_record_sets(start_type, start_name)
return 200, headers, template.render(record_sets=record_sets)
def health_check_response(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
parsed_url = urlparse(full_url)
method = request.method
if method == "POST":
properties = xmltodict.parse(self.body)["CreateHealthCheckRequest"][
"HealthCheckConfig"
]
health_check_args = {
"ip_address": properties.get("IPAddress"),
"port": properties.get("Port"),
"type": properties["Type"],
"resource_path": properties.get("ResourcePath"),
"fqdn": properties.get("FullyQualifiedDomainName"),
"search_string": properties.get("SearchString"),
"request_interval": properties.get("RequestInterval"),
"failure_threshold": properties.get("FailureThreshold"),
}
health_check = route53_backend.create_health_check(health_check_args)
template = Template(CREATE_HEALTH_CHECK_RESPONSE)
return 201, headers, template.render(health_check=health_check)
elif method == "DELETE":
health_check_id = parsed_url.path.split("/")[-1]
route53_backend.delete_health_check(health_check_id)
return 200, headers, DELETE_HEALTH_CHECK_RESPONSE
elif method == "GET":
template = Template(LIST_HEALTH_CHECKS_RESPONSE)
health_checks = route53_backend.get_health_checks()
return 200, headers, template.render(health_checks=health_checks)
def not_implemented_response(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
action = ""
if "tags" in full_url:
action = "tags"
elif "trafficpolicyinstances" in full_url:
action = "policies"
raise NotImplementedError(
"The action for {0} has not been implemented for route 53".format(action)
)
def list_or_change_tags_for_resource_request(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
parsed_url = urlparse(full_url)
id_ = parsed_url.path.split("/")[-1]
type_ = parsed_url.path.split("/")[-2]
if request.method == "GET":
tags = route53_backend.list_tags_for_resource(id_)
template = Template(LIST_TAGS_FOR_RESOURCE_RESPONSE)
return (
200,
headers,
template.render(resource_type=type_, resource_id=id_, tags=tags),
)
if request.method == "POST":
tags = xmltodict.parse(self.body)["ChangeTagsForResourceRequest"]
if "AddTags" in tags:
tags = tags["AddTags"]
elif "RemoveTagKeys" in tags:
tags = tags["RemoveTagKeys"]
route53_backend.change_tags_for_resource(id_, tags)
template = Template(CHANGE_TAGS_FOR_RESOURCE_RESPONSE)
return 200, headers, template.render()
def get_change(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
if request.method == "GET":
parsed_url = urlparse(full_url)
change_id = parsed_url.path.rstrip("/").rsplit("/", 1)[1]
template = Template(GET_CHANGE_RESPONSE)
return 200, headers, template.render(change_id=change_id)
LIST_TAGS_FOR_RESOURCE_RESPONSE = """
<ListTagsForResourceResponse xmlns="https://route53.amazonaws.com/doc/2015-01-01/">
<ResourceTagSet>
<ResourceType>{{resource_type}}</ResourceType>
<ResourceId>{{resource_id}}</ResourceId>
<Tags>
{% for key, value in tags.items() %}
<Tag>
<Key>{{key}}</Key>
<Value>{{value}}</Value>
</Tag>
{% endfor %}
</Tags>
</ResourceTagSet>
</ListTagsForResourceResponse>
"""
CHANGE_TAGS_FOR_RESOURCE_RESPONSE = """<ChangeTagsForResourceResponse xmlns="https://route53.amazonaws.com/doc/2015-01-01/">
</ChangeTagsForResourceResponse>
"""
LIST_RRSET_RESPONSE = """<ListResourceRecordSetsResponse xmlns="https://route53.amazonaws.com/doc/2012-12-12/">
<ResourceRecordSets>
{% for record_set in record_sets %}
{{ record_set.to_xml() }}
{% endfor %}
</ResourceRecordSets>
<IsTruncated>false</IsTruncated>
</ListResourceRecordSetsResponse>"""
CHANGE_RRSET_RESPONSE = """<ChangeResourceRecordSetsResponse xmlns="https://route53.amazonaws.com/doc/2012-12-12/">
<ChangeInfo>
<Status>INSYNC</Status>
<SubmittedAt>2010-09-10T01:36:41.958Z</SubmittedAt>
<Id>/change/C2682N5HXP0BZ4</Id>
</ChangeInfo>
</ChangeResourceRecordSetsResponse>"""
DELETE_HOSTED_ZONE_RESPONSE = """<DeleteHostedZoneResponse xmlns="https://route53.amazonaws.com/doc/2012-12-12/">
<ChangeInfo>
</ChangeInfo>
</DeleteHostedZoneResponse>"""
GET_HOSTED_ZONE_RESPONSE = """<GetHostedZoneResponse xmlns="https://route53.amazonaws.com/doc/2012-12-12/">
<HostedZone>
<Id>/hostedzone/{{ zone.id }}</Id>
<Name>{{ zone.name }}</Name>
<ResourceRecordSetCount>{{ zone.rrsets|count }}</ResourceRecordSetCount>
<Config>
{% if zone.comment %}
<Comment>{{ zone.comment }}</Comment>
{% endif %}
<PrivateZone>{{ zone.private_zone }}</PrivateZone>
</Config>
</HostedZone>
<DelegationSet>
<NameServers>
<NameServer>moto.test.com</NameServer>
</NameServers>
</DelegationSet>
</GetHostedZoneResponse>"""
CREATE_HOSTED_ZONE_RESPONSE = """<CreateHostedZoneResponse xmlns="https://route53.amazonaws.com/doc/2012-12-12/">
<HostedZone>
<Id>/hostedzone/{{ zone.id }}</Id>
<Name>{{ zone.name }}</Name>
<ResourceRecordSetCount>0</ResourceRecordSetCount>
<Config>
{% if zone.comment %}
<Comment>{{ zone.comment }}</Comment>
{% endif %}
<PrivateZone>{{ zone.private_zone }}</PrivateZone>
</Config>
</HostedZone>
<DelegationSet>
<NameServers>
<NameServer>moto.test.com</NameServer>
</NameServers>
</DelegationSet>
</CreateHostedZoneResponse>"""
LIST_HOSTED_ZONES_RESPONSE = """<ListHostedZonesResponse xmlns="https://route53.amazonaws.com/doc/2012-12-12/">
<HostedZones>
{% for zone in zones %}
<HostedZone>
<Id>/hostedzone/{{ zone.id }}</Id>
<Name>{{ zone.name }}</Name>
<Config>
{% if zone.comment %}
<Comment>{{ zone.comment }}</Comment>
{% endif %}
<PrivateZone>{{ zone.private_zone }}</PrivateZone>
</Config>
<ResourceRecordSetCount>{{ zone.rrsets|count }}</ResourceRecordSetCount>
</HostedZone>
{% endfor %}
</HostedZones>
<IsTruncated>false</IsTruncated>
</ListHostedZonesResponse>"""
LIST_HOSTED_ZONES_BY_NAME_RESPONSE = """<ListHostedZonesByNameResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
{% if dnsname %}
<DNSName>{{ dnsname }}</DNSName>
{% endif %}
<HostedZones>
{% for zone in zones %}
<HostedZone>
<Id>/hostedzone/{{ zone.id }}</Id>
<Name>{{ zone.name }}</Name>
<Config>
{% if zone.comment %}
<Comment>{{ zone.comment }}</Comment>
{% endif %}
<PrivateZone>{{ zone.private_zone }}</PrivateZone>
</Config>
<ResourceRecordSetCount>{{ zone.rrsets|count }}</ResourceRecordSetCount>
</HostedZone>
{% endfor %}
</HostedZones>
<IsTruncated>false</IsTruncated>
</ListHostedZonesByNameResponse>"""
CREATE_HEALTH_CHECK_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CreateHealthCheckResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
{{ health_check.to_xml() }}
</CreateHealthCheckResponse>"""
LIST_HEALTH_CHECKS_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<ListHealthChecksResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
<HealthChecks>
{% for health_check in health_checks %}
{{ health_check.to_xml() }}
{% endfor %}
</HealthChecks>
<IsTruncated>false</IsTruncated>
<MaxItems>{{ health_checks|length }}</MaxItems>
</ListHealthChecksResponse>"""
DELETE_HEALTH_CHECK_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<DeleteHealthCheckResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
</DeleteHealthCheckResponse>"""
GET_CHANGE_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<GetChangeResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
<ChangeInfo>
<Status>INSYNC</Status>
<SubmittedAt>2010-09-10T01:36:41.958Z</SubmittedAt>
<Id>{{ change_id }}</Id>
</ChangeInfo>
</GetChangeResponse>"""
|
from __future__ import unicode_literals
from jinja2 import Template
from six.moves.urllib.parse import parse_qs, urlparse
from moto.core.responses import BaseResponse
from .models import route53_backend
import xmltodict
class Route53(BaseResponse):
def list_or_create_hostzone_response(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
if request.method == "POST":
elements = xmltodict.parse(self.body)
if "HostedZoneConfig" in elements["CreateHostedZoneRequest"]:
comment = elements["CreateHostedZoneRequest"]["HostedZoneConfig"][
"Comment"
]
try:
# in boto3, this field is set directly in the xml
private_zone = elements["CreateHostedZoneRequest"][
"HostedZoneConfig"
]["PrivateZone"]
except KeyError:
# if a VPC subsection is only included in xmls params when private_zone=True,
# see boto: boto/route53/connection.py
private_zone = "VPC" in elements["CreateHostedZoneRequest"]
else:
comment = None
private_zone = False
name = elements["CreateHostedZoneRequest"]["Name"]
if name[-1] != ".":
name += "."
new_zone = route53_backend.create_hosted_zone(
name, comment=comment, private_zone=private_zone
)
template = Template(CREATE_HOSTED_ZONE_RESPONSE)
return 201, headers, template.render(zone=new_zone)
elif request.method == "GET":
all_zones = route53_backend.get_all_hosted_zones()
template = Template(LIST_HOSTED_ZONES_RESPONSE)
return 200, headers, template.render(zones=all_zones)
def list_hosted_zones_by_name_response(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
parsed_url = urlparse(full_url)
query_params = parse_qs(parsed_url.query)
dnsname = query_params.get("dnsname")
if dnsname:
dnsname = dnsname[0]
if dnsname[-1] != ".":
dnsname += "."
zones = [
zone
for zone in route53_backend.get_all_hosted_zones()
if zone.name == dnsname
]
else:
# sort by names, but with domain components reversed
# see http://boto3.readthedocs.io/en/latest/reference/services/route53.html#Route53.Client.list_hosted_zones_by_name
def sort_key(zone):
domains = zone.name.split(".")
if domains[-1] == "":
domains = domains[-1:] + domains[:-1]
return ".".join(reversed(domains))
zones = route53_backend.get_all_hosted_zones()
zones = sorted(zones, key=sort_key)
template = Template(LIST_HOSTED_ZONES_BY_NAME_RESPONSE)
return 200, headers, template.render(zones=zones, dnsname=dnsname)
def get_or_delete_hostzone_response(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
parsed_url = urlparse(full_url)
zoneid = parsed_url.path.rstrip("/").rsplit("/", 1)[1]
the_zone = route53_backend.get_hosted_zone(zoneid)
if not the_zone:
return 404, headers, "Zone %s not Found" % zoneid
if request.method == "GET":
template = Template(GET_HOSTED_ZONE_RESPONSE)
return 200, headers, template.render(zone=the_zone)
elif request.method == "DELETE":
route53_backend.delete_hosted_zone(zoneid)
return 200, headers, DELETE_HOSTED_ZONE_RESPONSE
def rrset_response(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
parsed_url = urlparse(full_url)
method = request.method
zoneid = parsed_url.path.rstrip("/").rsplit("/", 2)[1]
the_zone = route53_backend.get_hosted_zone(zoneid)
if not the_zone:
return 404, headers, "Zone %s Not Found" % zoneid
if method == "POST":
elements = xmltodict.parse(self.body)
change_list = elements["ChangeResourceRecordSetsRequest"]["ChangeBatch"][
"Changes"
]["Change"]
if not isinstance(change_list, list):
change_list = [
elements["ChangeResourceRecordSetsRequest"]["ChangeBatch"][
"Changes"
]["Change"]
]
for value in change_list:
action = value["Action"]
record_set = value["ResourceRecordSet"]
cleaned_record_name = record_set["Name"].strip(".")
cleaned_hosted_zone_name = the_zone.name.strip(".")
if not cleaned_record_name.endswith(cleaned_hosted_zone_name):
error_msg = """
An error occurred (InvalidChangeBatch) when calling the ChangeResourceRecordSets operation:
RRSet with DNS name %s is not permitted in zone %s
""" % (
record_set["Name"],
the_zone.name,
)
return 400, headers, error_msg
if not record_set["Name"].endswith("."):
record_set["Name"] += "."
if action in ("CREATE", "UPSERT"):
if "ResourceRecords" in record_set:
resource_records = list(record_set["ResourceRecords"].values())[
0
]
if not isinstance(resource_records, list):
# Depending on how many records there are, this may
# or may not be a list
resource_records = [resource_records]
record_set["ResourceRecords"] = [
x["Value"] for x in resource_records
]
if action == "CREATE":
the_zone.add_rrset(record_set)
else:
the_zone.upsert_rrset(record_set)
elif action == "DELETE":
if "SetIdentifier" in record_set:
the_zone.delete_rrset_by_id(record_set["SetIdentifier"])
else:
the_zone.delete_rrset(record_set)
return 200, headers, CHANGE_RRSET_RESPONSE
elif method == "GET":
querystring = parse_qs(parsed_url.query)
template = Template(LIST_RRSET_RESPONSE)
start_type = querystring.get("type", [None])[0]
start_name = querystring.get("name", [None])[0]
if start_type and not start_name:
return 400, headers, "The input is not valid"
record_sets = the_zone.get_record_sets(start_type, start_name)
return 200, headers, template.render(record_sets=record_sets)
def health_check_response(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
parsed_url = urlparse(full_url)
method = request.method
if method == "POST":
properties = xmltodict.parse(self.body)["CreateHealthCheckRequest"][
"HealthCheckConfig"
]
health_check_args = {
"ip_address": properties.get("IPAddress"),
"port": properties.get("Port"),
"type": properties["Type"],
"resource_path": properties.get("ResourcePath"),
"fqdn": properties.get("FullyQualifiedDomainName"),
"search_string": properties.get("SearchString"),
"request_interval": properties.get("RequestInterval"),
"failure_threshold": properties.get("FailureThreshold"),
}
health_check = route53_backend.create_health_check(health_check_args)
template = Template(CREATE_HEALTH_CHECK_RESPONSE)
return 201, headers, template.render(health_check=health_check)
elif method == "DELETE":
health_check_id = parsed_url.path.split("/")[-1]
route53_backend.delete_health_check(health_check_id)
return 200, headers, DELETE_HEALTH_CHECK_RESPONSE
elif method == "GET":
template = Template(LIST_HEALTH_CHECKS_RESPONSE)
health_checks = route53_backend.get_health_checks()
return 200, headers, template.render(health_checks=health_checks)
def not_implemented_response(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
action = ""
if "tags" in full_url:
action = "tags"
elif "trafficpolicyinstances" in full_url:
action = "policies"
raise NotImplementedError(
"The action for {0} has not been implemented for route 53".format(action)
)
def list_or_change_tags_for_resource_request(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
parsed_url = urlparse(full_url)
id_ = parsed_url.path.split("/")[-1]
type_ = parsed_url.path.split("/")[-2]
if request.method == "GET":
tags = route53_backend.list_tags_for_resource(id_)
template = Template(LIST_TAGS_FOR_RESOURCE_RESPONSE)
return (
200,
headers,
template.render(resource_type=type_, resource_id=id_, tags=tags),
)
if request.method == "POST":
tags = xmltodict.parse(self.body)["ChangeTagsForResourceRequest"]
if "AddTags" in tags:
tags = tags["AddTags"]
elif "RemoveTagKeys" in tags:
tags = tags["RemoveTagKeys"]
route53_backend.change_tags_for_resource(id_, tags)
template = Template(CHANGE_TAGS_FOR_RESOURCE_RESPONSE)
return 200, headers, template.render()
def get_change(self, request, full_url, headers):
self.setup_class(request, full_url, headers)
if request.method == "GET":
parsed_url = urlparse(full_url)
change_id = parsed_url.path.rstrip("/").rsplit("/", 1)[1]
template = Template(GET_CHANGE_RESPONSE)
return 200, headers, template.render(change_id=change_id)
LIST_TAGS_FOR_RESOURCE_RESPONSE = """
<ListTagsForResourceResponse xmlns="https://route53.amazonaws.com/doc/2015-01-01/">
<ResourceTagSet>
<ResourceType>{{resource_type}}</ResourceType>
<ResourceId>{{resource_id}}</ResourceId>
<Tags>
{% for key, value in tags.items() %}
<Tag>
<Key>{{key}}</Key>
<Value>{{value}}</Value>
</Tag>
{% endfor %}
</Tags>
</ResourceTagSet>
</ListTagsForResourceResponse>
"""
CHANGE_TAGS_FOR_RESOURCE_RESPONSE = """<ChangeTagsForResourceResponse xmlns="https://route53.amazonaws.com/doc/2015-01-01/">
</ChangeTagsForResourceResponse>
"""
LIST_RRSET_RESPONSE = """<ListResourceRecordSetsResponse xmlns="https://route53.amazonaws.com/doc/2012-12-12/">
<ResourceRecordSets>
{% for record_set in record_sets %}
{{ record_set.to_xml() }}
{% endfor %}
</ResourceRecordSets>
<IsTruncated>false</IsTruncated>
</ListResourceRecordSetsResponse>"""
CHANGE_RRSET_RESPONSE = """<ChangeResourceRecordSetsResponse xmlns="https://route53.amazonaws.com/doc/2012-12-12/">
<ChangeInfo>
<Status>INSYNC</Status>
<SubmittedAt>2010-09-10T01:36:41.958Z</SubmittedAt>
<Id>/change/C2682N5HXP0BZ4</Id>
</ChangeInfo>
</ChangeResourceRecordSetsResponse>"""
DELETE_HOSTED_ZONE_RESPONSE = """<DeleteHostedZoneResponse xmlns="https://route53.amazonaws.com/doc/2012-12-12/">
<ChangeInfo>
</ChangeInfo>
</DeleteHostedZoneResponse>"""
GET_HOSTED_ZONE_RESPONSE = """<GetHostedZoneResponse xmlns="https://route53.amazonaws.com/doc/2012-12-12/">
<HostedZone>
<Id>/hostedzone/{{ zone.id }}</Id>
<Name>{{ zone.name }}</Name>
<ResourceRecordSetCount>{{ zone.rrsets|count }}</ResourceRecordSetCount>
<Config>
{% if zone.comment %}
<Comment>{{ zone.comment }}</Comment>
{% endif %}
<PrivateZone>{{ zone.private_zone }}</PrivateZone>
</Config>
</HostedZone>
<DelegationSet>
<NameServers>
<NameServer>moto.test.com</NameServer>
</NameServers>
</DelegationSet>
</GetHostedZoneResponse>"""
CREATE_HOSTED_ZONE_RESPONSE = """<CreateHostedZoneResponse xmlns="https://route53.amazonaws.com/doc/2012-12-12/">
<HostedZone>
<Id>/hostedzone/{{ zone.id }}</Id>
<Name>{{ zone.name }}</Name>
<ResourceRecordSetCount>0</ResourceRecordSetCount>
<Config>
{% if zone.comment %}
<Comment>{{ zone.comment }}</Comment>
{% endif %}
<PrivateZone>{{ zone.private_zone }}</PrivateZone>
</Config>
</HostedZone>
<DelegationSet>
<NameServers>
<NameServer>moto.test.com</NameServer>
</NameServers>
</DelegationSet>
</CreateHostedZoneResponse>"""
LIST_HOSTED_ZONES_RESPONSE = """<ListHostedZonesResponse xmlns="https://route53.amazonaws.com/doc/2012-12-12/">
<HostedZones>
{% for zone in zones %}
<HostedZone>
<Id>/hostedzone/{{ zone.id }}</Id>
<Name>{{ zone.name }}</Name>
<Config>
{% if zone.comment %}
<Comment>{{ zone.comment }}</Comment>
{% endif %}
<PrivateZone>{{ zone.private_zone }}</PrivateZone>
</Config>
<ResourceRecordSetCount>{{ zone.rrsets|count }}</ResourceRecordSetCount>
</HostedZone>
{% endfor %}
</HostedZones>
<IsTruncated>false</IsTruncated>
</ListHostedZonesResponse>"""
LIST_HOSTED_ZONES_BY_NAME_RESPONSE = """<ListHostedZonesByNameResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
{% if dnsname %}
<DNSName>{{ dnsname }}</DNSName>
{% endif %}
<HostedZones>
{% for zone in zones %}
<HostedZone>
<Id>/hostedzone/{{ zone.id }}</Id>
<Name>{{ zone.name }}</Name>
<Config>
{% if zone.comment %}
<Comment>{{ zone.comment }}</Comment>
{% endif %}
<PrivateZone>{{ zone.private_zone }}</PrivateZone>
</Config>
<ResourceRecordSetCount>{{ zone.rrsets|count }}</ResourceRecordSetCount>
</HostedZone>
{% endfor %}
</HostedZones>
<IsTruncated>false</IsTruncated>
</ListHostedZonesByNameResponse>"""
CREATE_HEALTH_CHECK_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CreateHealthCheckResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
{{ health_check.to_xml() }}
</CreateHealthCheckResponse>"""
LIST_HEALTH_CHECKS_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<ListHealthChecksResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
<HealthChecks>
{% for health_check in health_checks %}
{{ health_check.to_xml() }}
{% endfor %}
</HealthChecks>
<IsTruncated>false</IsTruncated>
<MaxItems>{{ health_checks|length }}</MaxItems>
</ListHealthChecksResponse>"""
DELETE_HEALTH_CHECK_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<DeleteHealthCheckResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
</DeleteHealthCheckResponse>"""
GET_CHANGE_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<GetChangeResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
<ChangeInfo>
<Status>INSYNC</Status>
<SubmittedAt>2010-09-10T01:36:41.958Z</SubmittedAt>
<Id>{{ change_id }}</Id>
</ChangeInfo>
</GetChangeResponse>"""
|
en
| 0.32114
|
# in boto3, this field is set directly in the xml # if a VPC subsection is only included in xmls params when private_zone=True, # see boto: boto/route53/connection.py # sort by names, but with domain components reversed # see http://boto3.readthedocs.io/en/latest/reference/services/route53.html#Route53.Client.list_hosted_zones_by_name An error occurred (InvalidChangeBatch) when calling the ChangeResourceRecordSets operation: RRSet with DNS name %s is not permitted in zone %s # Depending on how many records there are, this may # or may not be a list <ListTagsForResourceResponse xmlns="https://route53.amazonaws.com/doc/2015-01-01/"> <ResourceTagSet> <ResourceType>{{resource_type}}</ResourceType> <ResourceId>{{resource_id}}</ResourceId> <Tags> {% for key, value in tags.items() %} <Tag> <Key>{{key}}</Key> <Value>{{value}}</Value> </Tag> {% endfor %} </Tags> </ResourceTagSet> </ListTagsForResourceResponse> <ChangeTagsForResourceResponse xmlns="https://route53.amazonaws.com/doc/2015-01-01/"> </ChangeTagsForResourceResponse> <ListResourceRecordSetsResponse xmlns="https://route53.amazonaws.com/doc/2012-12-12/"> <ResourceRecordSets> {% for record_set in record_sets %} {{ record_set.to_xml() }} {% endfor %} </ResourceRecordSets> <IsTruncated>false</IsTruncated> </ListResourceRecordSetsResponse> <ChangeResourceRecordSetsResponse xmlns="https://route53.amazonaws.com/doc/2012-12-12/"> <ChangeInfo> <Status>INSYNC</Status> <SubmittedAt>2010-09-10T01:36:41.958Z</SubmittedAt> <Id>/change/C2682N5HXP0BZ4</Id> </ChangeInfo> </ChangeResourceRecordSetsResponse> <DeleteHostedZoneResponse xmlns="https://route53.amazonaws.com/doc/2012-12-12/"> <ChangeInfo> </ChangeInfo> </DeleteHostedZoneResponse> <GetHostedZoneResponse xmlns="https://route53.amazonaws.com/doc/2012-12-12/"> <HostedZone> <Id>/hostedzone/{{ zone.id }}</Id> <Name>{{ zone.name }}</Name> <ResourceRecordSetCount>{{ zone.rrsets|count }}</ResourceRecordSetCount> <Config> {% if zone.comment %} <Comment>{{ zone.comment }}</Comment> {% endif %} <PrivateZone>{{ zone.private_zone }}</PrivateZone> </Config> </HostedZone> <DelegationSet> <NameServers> <NameServer>moto.test.com</NameServer> </NameServers> </DelegationSet> </GetHostedZoneResponse> <CreateHostedZoneResponse xmlns="https://route53.amazonaws.com/doc/2012-12-12/"> <HostedZone> <Id>/hostedzone/{{ zone.id }}</Id> <Name>{{ zone.name }}</Name> <ResourceRecordSetCount>0</ResourceRecordSetCount> <Config> {% if zone.comment %} <Comment>{{ zone.comment }}</Comment> {% endif %} <PrivateZone>{{ zone.private_zone }}</PrivateZone> </Config> </HostedZone> <DelegationSet> <NameServers> <NameServer>moto.test.com</NameServer> </NameServers> </DelegationSet> </CreateHostedZoneResponse> <ListHostedZonesResponse xmlns="https://route53.amazonaws.com/doc/2012-12-12/"> <HostedZones> {% for zone in zones %} <HostedZone> <Id>/hostedzone/{{ zone.id }}</Id> <Name>{{ zone.name }}</Name> <Config> {% if zone.comment %} <Comment>{{ zone.comment }}</Comment> {% endif %} <PrivateZone>{{ zone.private_zone }}</PrivateZone> </Config> <ResourceRecordSetCount>{{ zone.rrsets|count }}</ResourceRecordSetCount> </HostedZone> {% endfor %} </HostedZones> <IsTruncated>false</IsTruncated> </ListHostedZonesResponse> <ListHostedZonesByNameResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/"> {% if dnsname %} <DNSName>{{ dnsname }}</DNSName> {% endif %} <HostedZones> {% for zone in zones %} <HostedZone> <Id>/hostedzone/{{ zone.id }}</Id> <Name>{{ zone.name }}</Name> <Config> {% if zone.comment %} <Comment>{{ zone.comment }}</Comment> {% endif %} <PrivateZone>{{ zone.private_zone }}</PrivateZone> </Config> <ResourceRecordSetCount>{{ zone.rrsets|count }}</ResourceRecordSetCount> </HostedZone> {% endfor %} </HostedZones> <IsTruncated>false</IsTruncated> </ListHostedZonesByNameResponse> <?xml version="1.0" encoding="UTF-8"?> <CreateHealthCheckResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/"> {{ health_check.to_xml() }} </CreateHealthCheckResponse> <?xml version="1.0" encoding="UTF-8"?> <ListHealthChecksResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/"> <HealthChecks> {% for health_check in health_checks %} {{ health_check.to_xml() }} {% endfor %} </HealthChecks> <IsTruncated>false</IsTruncated> <MaxItems>{{ health_checks|length }}</MaxItems> </ListHealthChecksResponse> <?xml version="1.0" encoding="UTF-8"?> <DeleteHealthCheckResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/"> </DeleteHealthCheckResponse> <?xml version="1.0" encoding="UTF-8"?> <GetChangeResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/"> <ChangeInfo> <Status>INSYNC</Status> <SubmittedAt>2010-09-10T01:36:41.958Z</SubmittedAt> <Id>{{ change_id }}</Id> </ChangeInfo> </GetChangeResponse>
| 1.951159
| 2
|
pyiid/tests/test_calc/test_calc_1d.py
|
ZhouHUB/pyIID
| 0
|
6625663
|
from pyiid.tests import *
from pyiid.experiments.elasticscatter import ElasticScatter
from pyiid.calc.calc_1d import Calc1D
__author__ = 'christopher'
def check_meta(value):
value[0](value[1:])
def check_nrg(value):
"""
Check two processor, algorithm pairs against each other for PDF energy
Parameters
----------
value: list or tuple
The values to use in the tests
"""
rtol = 4e-6
atol = 9e-6
# setup
atoms1, atoms2 = value[0]
exp_dict = value[1]
p, thresh = value[2]
proc1, alg1 = value[3][0]
proc2, alg2 = value[3][1]
scat = ElasticScatter(verbose=True)
scat.update_experiment(exp_dict)
scat.set_processor(proc1, alg1)
if value[4] == 'FQ':
exp_func = scat.get_fq
exp_grad = scat.get_grad_fq
elif value[4] == 'PDF':
exp_func = scat.get_pdf
exp_grad = scat.get_grad_pdf
else:
exp_func = None
exp_grad = None
target_data = exp_func(atoms1)
calc = Calc1D(target_data=target_data,
exp_function=exp_func, exp_grad_function=exp_grad,
potential=p)
atoms2.set_calculator(calc)
ans1 = atoms2.get_potential_energy()
scat.set_processor(proc2, alg2)
calc = Calc1D(target_data=target_data,
exp_function=exp_func, exp_grad_function=exp_grad,
potential=p)
atoms2.set_calculator(calc)
ans2 = atoms2.get_potential_energy()
stats_check(ans2, ans1, rtol, atol)
def check_forces(value):
"""
Check two processor, algorithm pairs against each other for PDF forces
:param value:
:return:
"""
# setup
rtol = 1e-4
atol = 6e-5
atoms1, atoms2 = value[0]
exp_dict = value[1]
p, thresh = value[2]
proc1, alg1 = value[3][0]
proc2, alg2 = value[3][1]
scat = ElasticScatter(verbose=True)
scat.update_experiment(exp_dict)
scat.set_processor(proc1, alg1)
if value[4] == 'FQ':
exp_func = scat.get_fq
exp_grad = scat.get_grad_fq
elif value[4] == 'PDF':
exp_func = scat.get_pdf
exp_grad = scat.get_grad_pdf
else:
exp_func = None
exp_grad = None
target_data = exp_func(atoms1)
calc = Calc1D(target_data=target_data,
exp_function=exp_func, exp_grad_function=exp_grad,
potential=p)
atoms2.set_calculator(calc)
ans1 = atoms2.get_forces()
scat.set_processor(proc2, alg2)
calc = Calc1D(target_data=target_data,
exp_function=exp_func, exp_grad_function=exp_grad,
potential=p)
atoms2.set_calculator(calc)
ans2 = atoms2.get_forces()
stats_check(ans2, ans1,
rtol=rtol,
atol=atol
)
tests = [
check_nrg,
check_forces
]
test_experiment_types = ['FQ', 'PDF']
test_data = tuple(product(tests,
test_double_atoms, test_exp, test_potentials,
comparison_pro_alg_pairs, test_experiment_types))
def test_meta():
for v in test_data:
yield check_meta, v
if __name__ == '__main__':
import nose
nose.runmodule(argv=[
# '-s',
'--with-doctest',
# '--nocapture',
'-v',
'-x'
],
# env={"NOSE_PROCESSES": 1, "NOSE_PROCESS_TIMEOUT": 599},
exit=False)
|
from pyiid.tests import *
from pyiid.experiments.elasticscatter import ElasticScatter
from pyiid.calc.calc_1d import Calc1D
__author__ = 'christopher'
def check_meta(value):
value[0](value[1:])
def check_nrg(value):
"""
Check two processor, algorithm pairs against each other for PDF energy
Parameters
----------
value: list or tuple
The values to use in the tests
"""
rtol = 4e-6
atol = 9e-6
# setup
atoms1, atoms2 = value[0]
exp_dict = value[1]
p, thresh = value[2]
proc1, alg1 = value[3][0]
proc2, alg2 = value[3][1]
scat = ElasticScatter(verbose=True)
scat.update_experiment(exp_dict)
scat.set_processor(proc1, alg1)
if value[4] == 'FQ':
exp_func = scat.get_fq
exp_grad = scat.get_grad_fq
elif value[4] == 'PDF':
exp_func = scat.get_pdf
exp_grad = scat.get_grad_pdf
else:
exp_func = None
exp_grad = None
target_data = exp_func(atoms1)
calc = Calc1D(target_data=target_data,
exp_function=exp_func, exp_grad_function=exp_grad,
potential=p)
atoms2.set_calculator(calc)
ans1 = atoms2.get_potential_energy()
scat.set_processor(proc2, alg2)
calc = Calc1D(target_data=target_data,
exp_function=exp_func, exp_grad_function=exp_grad,
potential=p)
atoms2.set_calculator(calc)
ans2 = atoms2.get_potential_energy()
stats_check(ans2, ans1, rtol, atol)
def check_forces(value):
"""
Check two processor, algorithm pairs against each other for PDF forces
:param value:
:return:
"""
# setup
rtol = 1e-4
atol = 6e-5
atoms1, atoms2 = value[0]
exp_dict = value[1]
p, thresh = value[2]
proc1, alg1 = value[3][0]
proc2, alg2 = value[3][1]
scat = ElasticScatter(verbose=True)
scat.update_experiment(exp_dict)
scat.set_processor(proc1, alg1)
if value[4] == 'FQ':
exp_func = scat.get_fq
exp_grad = scat.get_grad_fq
elif value[4] == 'PDF':
exp_func = scat.get_pdf
exp_grad = scat.get_grad_pdf
else:
exp_func = None
exp_grad = None
target_data = exp_func(atoms1)
calc = Calc1D(target_data=target_data,
exp_function=exp_func, exp_grad_function=exp_grad,
potential=p)
atoms2.set_calculator(calc)
ans1 = atoms2.get_forces()
scat.set_processor(proc2, alg2)
calc = Calc1D(target_data=target_data,
exp_function=exp_func, exp_grad_function=exp_grad,
potential=p)
atoms2.set_calculator(calc)
ans2 = atoms2.get_forces()
stats_check(ans2, ans1,
rtol=rtol,
atol=atol
)
tests = [
check_nrg,
check_forces
]
test_experiment_types = ['FQ', 'PDF']
test_data = tuple(product(tests,
test_double_atoms, test_exp, test_potentials,
comparison_pro_alg_pairs, test_experiment_types))
def test_meta():
for v in test_data:
yield check_meta, v
if __name__ == '__main__':
import nose
nose.runmodule(argv=[
# '-s',
'--with-doctest',
# '--nocapture',
'-v',
'-x'
],
# env={"NOSE_PROCESSES": 1, "NOSE_PROCESS_TIMEOUT": 599},
exit=False)
|
en
| 0.567802
|
Check two processor, algorithm pairs against each other for PDF energy Parameters ---------- value: list or tuple The values to use in the tests # setup Check two processor, algorithm pairs against each other for PDF forces :param value: :return: # setup # '-s', # '--nocapture', # env={"NOSE_PROCESSES": 1, "NOSE_PROCESS_TIMEOUT": 599},
| 2.167449
| 2
|
django_blog/admin.py
|
Emiliemorais/blog_django_plugin
| 0
|
6625664
|
<gh_stars>0
from django.contrib import admin
from .models import Post
class PostAdmin(admin.ModelAdmin):
pass
admin.site.register(Post, PostAdmin)
|
from django.contrib import admin
from .models import Post
class PostAdmin(admin.ModelAdmin):
pass
admin.site.register(Post, PostAdmin)
|
none
| 1
| 1.382541
| 1
|
|
mmcls/models/backbones/mobilenet_v2.py
|
agim-a/mmclassification
| 29
|
6625665
|
import logging
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import ConvModule, constant_init, kaiming_init
from mmcv.runner import load_checkpoint
from torch.nn.modules.batchnorm import _BatchNorm
from mmcls.models.utils import make_divisible
from ..builder import BACKBONES
from .base_backbone import BaseBackbone
class InvertedResidual(nn.Module):
"""InvertedResidual block for MobileNetV2.
Args:
in_channels (int): The input channels of the InvertedResidual block.
out_channels (int): The output channels of the InvertedResidual block.
stride (int): Stride of the middle (first) 3x3 convolution.
expand_ratio (int): adjusts number of channels of the hidden layer
in InvertedResidual by this amount.
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU6').
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
Returns:
Tensor: The output tensor
"""
def __init__(self,
in_channels,
out_channels,
stride,
expand_ratio,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU6'),
with_cp=False):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2], f'stride must in [1, 2]. ' \
f'But received {stride}.'
self.with_cp = with_cp
self.use_res_connect = self.stride == 1 and in_channels == out_channels
hidden_dim = int(round(in_channels * expand_ratio))
layers = []
if expand_ratio != 1:
layers.append(
ConvModule(
in_channels=in_channels,
out_channels=hidden_dim,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
layers.extend([
ConvModule(
in_channels=hidden_dim,
out_channels=hidden_dim,
kernel_size=3,
stride=stride,
padding=1,
groups=hidden_dim,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
ConvModule(
in_channels=hidden_dim,
out_channels=out_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
def _inner_forward(x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
@BACKBONES.register_module()
class MobileNetV2(BaseBackbone):
"""MobileNetV2 backbone.
Args:
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Default: 1.0.
out_indices (None or Sequence[int]): Output from which stages.
Default: (7, ).
frozen_stages (int): Stages to be frozen (all param fixed).
Default: -1, which means not freezing any parameters.
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU6').
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
# Parameters to build layers. 4 parameters are needed to construct a
# layer, from left to right: expand_ratio, channel, num_blocks, stride.
arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2],
[6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2],
[6, 320, 1, 1]]
def __init__(self,
widen_factor=1.,
out_indices=(7, ),
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU6'),
norm_eval=False,
with_cp=False):
super(MobileNetV2, self).__init__()
self.widen_factor = widen_factor
self.out_indices = out_indices
for index in out_indices:
if index not in range(0, 8):
raise ValueError('the item in out_indices must in '
f'range(0, 8). But received {index}')
if frozen_stages not in range(-1, 8):
raise ValueError('frozen_stages must be in range(-1, 8). '
f'But received {frozen_stages}')
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.in_channels = make_divisible(32 * widen_factor, 8)
self.conv1 = ConvModule(
in_channels=3,
out_channels=self.in_channels,
kernel_size=3,
stride=2,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.layers = []
for i, layer_cfg in enumerate(self.arch_settings):
expand_ratio, channel, num_blocks, stride = layer_cfg
out_channels = make_divisible(channel * widen_factor, 8)
inverted_res_layer = self.make_layer(
out_channels=out_channels,
num_blocks=num_blocks,
stride=stride,
expand_ratio=expand_ratio)
layer_name = f'layer{i + 1}'
self.add_module(layer_name, inverted_res_layer)
self.layers.append(layer_name)
if widen_factor > 1.0:
self.out_channel = int(1280 * widen_factor)
else:
self.out_channel = 1280
layer = ConvModule(
in_channels=self.in_channels,
out_channels=self.out_channel,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.add_module('conv2', layer)
self.layers.append('conv2')
def make_layer(self, out_channels, num_blocks, stride, expand_ratio):
""" Stack InvertedResidual blocks to build a layer for MobileNetV2.
Args:
out_channels (int): out_channels of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
expand_ratio (int): Expand the number of channels of the
hidden layer in InvertedResidual by this ratio. Default: 6.
"""
layers = []
for i in range(num_blocks):
if i >= 1:
stride = 1
layers.append(
InvertedResidual(
self.in_channels,
out_channels,
stride,
expand_ratio=expand_ratio,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
with_cp=self.with_cp))
self.in_channels = out_channels
return nn.Sequential(*layers)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
x = self.conv1(x)
outs = []
for i, layer_name in enumerate(self.layers):
layer = getattr(self, layer_name)
x = layer(x)
if i in self.out_indices:
outs.append(x)
if len(outs) == 1:
return outs[0]
else:
return tuple(outs)
def _freeze_stages(self):
if self.frozen_stages >= 0:
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
layer = getattr(self, f'layer{i}')
layer.eval()
for param in layer.parameters():
param.requires_grad = False
def train(self, mode=True):
super(MobileNetV2, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
|
import logging
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import ConvModule, constant_init, kaiming_init
from mmcv.runner import load_checkpoint
from torch.nn.modules.batchnorm import _BatchNorm
from mmcls.models.utils import make_divisible
from ..builder import BACKBONES
from .base_backbone import BaseBackbone
class InvertedResidual(nn.Module):
"""InvertedResidual block for MobileNetV2.
Args:
in_channels (int): The input channels of the InvertedResidual block.
out_channels (int): The output channels of the InvertedResidual block.
stride (int): Stride of the middle (first) 3x3 convolution.
expand_ratio (int): adjusts number of channels of the hidden layer
in InvertedResidual by this amount.
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU6').
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
Returns:
Tensor: The output tensor
"""
def __init__(self,
in_channels,
out_channels,
stride,
expand_ratio,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU6'),
with_cp=False):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2], f'stride must in [1, 2]. ' \
f'But received {stride}.'
self.with_cp = with_cp
self.use_res_connect = self.stride == 1 and in_channels == out_channels
hidden_dim = int(round(in_channels * expand_ratio))
layers = []
if expand_ratio != 1:
layers.append(
ConvModule(
in_channels=in_channels,
out_channels=hidden_dim,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
layers.extend([
ConvModule(
in_channels=hidden_dim,
out_channels=hidden_dim,
kernel_size=3,
stride=stride,
padding=1,
groups=hidden_dim,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
ConvModule(
in_channels=hidden_dim,
out_channels=out_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
def _inner_forward(x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
@BACKBONES.register_module()
class MobileNetV2(BaseBackbone):
"""MobileNetV2 backbone.
Args:
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Default: 1.0.
out_indices (None or Sequence[int]): Output from which stages.
Default: (7, ).
frozen_stages (int): Stages to be frozen (all param fixed).
Default: -1, which means not freezing any parameters.
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU6').
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
# Parameters to build layers. 4 parameters are needed to construct a
# layer, from left to right: expand_ratio, channel, num_blocks, stride.
arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2],
[6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2],
[6, 320, 1, 1]]
def __init__(self,
widen_factor=1.,
out_indices=(7, ),
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU6'),
norm_eval=False,
with_cp=False):
super(MobileNetV2, self).__init__()
self.widen_factor = widen_factor
self.out_indices = out_indices
for index in out_indices:
if index not in range(0, 8):
raise ValueError('the item in out_indices must in '
f'range(0, 8). But received {index}')
if frozen_stages not in range(-1, 8):
raise ValueError('frozen_stages must be in range(-1, 8). '
f'But received {frozen_stages}')
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.in_channels = make_divisible(32 * widen_factor, 8)
self.conv1 = ConvModule(
in_channels=3,
out_channels=self.in_channels,
kernel_size=3,
stride=2,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.layers = []
for i, layer_cfg in enumerate(self.arch_settings):
expand_ratio, channel, num_blocks, stride = layer_cfg
out_channels = make_divisible(channel * widen_factor, 8)
inverted_res_layer = self.make_layer(
out_channels=out_channels,
num_blocks=num_blocks,
stride=stride,
expand_ratio=expand_ratio)
layer_name = f'layer{i + 1}'
self.add_module(layer_name, inverted_res_layer)
self.layers.append(layer_name)
if widen_factor > 1.0:
self.out_channel = int(1280 * widen_factor)
else:
self.out_channel = 1280
layer = ConvModule(
in_channels=self.in_channels,
out_channels=self.out_channel,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.add_module('conv2', layer)
self.layers.append('conv2')
def make_layer(self, out_channels, num_blocks, stride, expand_ratio):
""" Stack InvertedResidual blocks to build a layer for MobileNetV2.
Args:
out_channels (int): out_channels of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
expand_ratio (int): Expand the number of channels of the
hidden layer in InvertedResidual by this ratio. Default: 6.
"""
layers = []
for i in range(num_blocks):
if i >= 1:
stride = 1
layers.append(
InvertedResidual(
self.in_channels,
out_channels,
stride,
expand_ratio=expand_ratio,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
with_cp=self.with_cp))
self.in_channels = out_channels
return nn.Sequential(*layers)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
x = self.conv1(x)
outs = []
for i, layer_name in enumerate(self.layers):
layer = getattr(self, layer_name)
x = layer(x)
if i in self.out_indices:
outs.append(x)
if len(outs) == 1:
return outs[0]
else:
return tuple(outs)
def _freeze_stages(self):
if self.frozen_stages >= 0:
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
layer = getattr(self, f'layer{i}')
layer.eval()
for param in layer.parameters():
param.requires_grad = False
def train(self, mode=True):
super(MobileNetV2, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
|
en
| 0.696937
|
InvertedResidual block for MobileNetV2. Args: in_channels (int): The input channels of the InvertedResidual block. out_channels (int): The output channels of the InvertedResidual block. stride (int): Stride of the middle (first) 3x3 convolution. expand_ratio (int): adjusts number of channels of the hidden layer in InvertedResidual by this amount. conv_cfg (dict, optional): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: dict(type='BN'). act_cfg (dict): Config dict for activation layer. Default: dict(type='ReLU6'). with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. Returns: Tensor: The output tensor MobileNetV2 backbone. Args: widen_factor (float): Width multiplier, multiply number of channels in each layer by this amount. Default: 1.0. out_indices (None or Sequence[int]): Output from which stages. Default: (7, ). frozen_stages (int): Stages to be frozen (all param fixed). Default: -1, which means not freezing any parameters. conv_cfg (dict, optional): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: dict(type='BN'). act_cfg (dict): Config dict for activation layer. Default: dict(type='ReLU6'). norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. Default: False. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. # Parameters to build layers. 4 parameters are needed to construct a # layer, from left to right: expand_ratio, channel, num_blocks, stride. Stack InvertedResidual blocks to build a layer for MobileNetV2. Args: out_channels (int): out_channels of block. num_blocks (int): number of blocks. stride (int): stride of the first block. Default: 1 expand_ratio (int): Expand the number of channels of the hidden layer in InvertedResidual by this ratio. Default: 6.
| 2.490853
| 2
|
src/mtensorflow/tf_linear_regression.py
|
mumupy/mmdeeplearning
| 9
|
6625666
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/10/23 10:43
# @Author : ganliang
# @File : tf_linear_regression.py
# @Desc : 单特征的线性回归
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import datasets
import numpy as np
from matplotlib import pyplot as plt
from src.config import logger
def normaize(X):
"""
数据归一化处理
:param X:
:return:
"""
return (X - np.mean(X)) / np.std(X)
def loaddata():
"""
获取到数据集
:return:
"""
boston_data = datasets.load_boston()
X_train, Y_train = boston_data.data[:, 5], boston_data.target
# X_train=normaize(X_train)
n_samples = len(X_train)
return (X_train, Y_train, n_samples)
def get_model():
"""
定义模型
:return:
"""
# 定义X,Y占位符
X = tf.placeholder(dtype=tf.float32, name="X")
Y = tf.placeholder(dtype=tf.float32, name="Y")
# 定义权重和偏差
w = tf.Variable(0.0, dtype=tf.float32, name="w")
b = tf.Variable(0.0, dtype=tf.float32, name="b")
return (X, Y, w, b)
def get_loss(X, Y, w, b):
"""
定义损失函数
:return:
"""
# 线性回归模型
Y_that = X * w + b
# 平方损失
loss = tf.square(Y - Y_that, name="loss")
return loss
def get_optimizer(loss):
"""
获取优化方法
:param loss:
:return:
"""
# 随机梯度下降优化算法
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(loss)
# optimizer = tf.train.AdagradOptimizer(learning_rate=0.01).minimize(loss)
return optimizer
def show_data(X_train, Y_train, w, b):
"""
显示真实数据和预测数据之间的差异
:param X_train: 训练数据
:param Y_train: 标签
:param w: 权重值
:param b: 偏差值
:return:
"""
y_pred = X_train * w + b
# 对比预估值和真实值
plt.plot(X_train, Y_train, "bo", label="Real Data")
plt.plot(X_train, y_pred, "r", label="predict Data")
plt.legend()
plt.show()
def show_loss(total):
"""
展示损失函数变化图
:return:
"""
plt.plot(total)
plt.show()
def linear_regression():
"""
简单的线性回归模型
:return:
"""
X, Y, w, b = get_model()
loss = get_loss(X, Y, w, b)
optimizer = get_optimizer(loss)
linear_ops = tf.global_variables_initializer()
X_train, Y_train, n_samples = loaddata()
total = []
with tf.Session() as sess:
sess.run(linear_ops)
writer = tf.summary.FileWriter("linear_regression", sess.graph)
for i in range(100):
total_loss = 0
for x, y in zip(X_train, Y_train):
_, l = sess.run([optimizer, loss], feed_dict={X: x, Y: y})
total_loss += l
total.append(total_loss / n_samples)
logger.info("epoll {0} loss {1}".format(i, total_loss / n_samples))
writer.close()
w_val, b_val = sess.run([w, b])
logger.info("w {0},b {1}".format(w_val, b_val))
writer.close()
show_data(X_train, Y_train, w_val, b_val)
# 查看损失变化图
show_loss(total)
def k_linear_regression():
"""
k折交叉验证发
:return:
"""
from sklearn.model_selection import KFold
X, Y, w, b = get_model()
loss = get_loss(X, Y, w, b)
optimizer = get_optimizer(loss)
linear_ops = tf.global_variables_initializer()
X_train, Y_train, n_samples = loaddata()
total = []
n_splits = 5
with tf.Session() as sess:
sess.run(linear_ops)
writer = tf.summary.FileWriter("linear_regression", sess.graph)
for i in range(10):
total_train_loss = 0
total_test_loss = 0
k_fold = KFold(n_splits=n_splits, shuffle=False, random_state=None)
for train_index, test_index in k_fold.split(X_train, Y_train):
# 训练数据
for x, y in zip(X_train[train_index], Y_train[train_index]):
_, l = sess.run([optimizer, loss], feed_dict={X: x, Y: y})
total_train_loss += l / len(train_index)
# 测试数据 计算评估误差
w_val, b_val = sess.run([w, b])
test_loss = get_loss(X_train[test_index], Y_train[test_index], w_val, b_val)
total_test_loss += tf.reduce_mean(test_loss).eval()
total.append(total_train_loss / n_splits)
logger.info("epoll {0} train loss {1} test loss {2}".format(i, total_train_loss / n_splits,
total_test_loss / n_splits))
writer.close()
w_val, b_val = sess.run([w, b])
logger.info("w {0},b {1}".format(w_val, b_val))
writer.close()
# 对比预估值和真实值
show_data(X_train, Y_train, w_val, b_val)
# 查看损失变化图
show_loss(total)
def multiple_linear_regression():
"""
多特征的线性回归模型
:return:
"""
boston_data = datasets.load_boston()
X_train, Y_train = boston_data.data, boston_data.target
n_samples = len(X_train)
# 将偏差作为一列特征添加进去
X_train = np.c_[X_train, np.zeros(shape=(n_samples, 1))]
n_features = X_train.shape[1]
X_train = (X_train - np.mean(X_train)) / np.std(X_train)
Y_train = np.reshape(Y_train, newshape=(n_samples, 1))
# TODO 暂时不考虑偏差,考虑偏差就是把偏差也当做一个特征值
X = tf.placeholder(dtype=tf.float32, shape=(n_samples, n_features), name="X")
Y = tf.placeholder(dtype=tf.float32, shape=(n_samples, 1), name="Y")
w = tf.Variable(tf.random_normal(shape=(n_features, 1)), name="w")
Y_that = tf.matmul(X, w)
loss = tf.reduce_mean(tf.square(Y - Y_that), name="loss")
optimizer = tf.train.GradientDescentOptimizer(0.001, name="optimizer").minimize(loss)
linear_ops = tf.global_variables_initializer()
total = []
with tf.Session() as sess:
sess.run(linear_ops)
writer = tf.summary.FileWriter("multiplinear_regression", sess.graph)
for i in range(1000):
_, total_loss = sess.run([optimizer, loss], feed_dict={X: X_train, Y: Y_train})
total.append(total_loss)
logger.info("epoll {0} loss {1}".format(i, total_loss / n_samples))
writer.close()
w_value = sess.run(w)
writer.close()
show_loss(total)
# 预测
n = 500
Y_pred = np.matmul(X_train[n, :], w_value)
logger.info("pred {0} real {1}".format(Y_pred[0], Y_train[n][0]))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/10/23 10:43
# @Author : ganliang
# @File : tf_linear_regression.py
# @Desc : 单特征的线性回归
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import datasets
import numpy as np
from matplotlib import pyplot as plt
from src.config import logger
def normaize(X):
"""
数据归一化处理
:param X:
:return:
"""
return (X - np.mean(X)) / np.std(X)
def loaddata():
"""
获取到数据集
:return:
"""
boston_data = datasets.load_boston()
X_train, Y_train = boston_data.data[:, 5], boston_data.target
# X_train=normaize(X_train)
n_samples = len(X_train)
return (X_train, Y_train, n_samples)
def get_model():
"""
定义模型
:return:
"""
# 定义X,Y占位符
X = tf.placeholder(dtype=tf.float32, name="X")
Y = tf.placeholder(dtype=tf.float32, name="Y")
# 定义权重和偏差
w = tf.Variable(0.0, dtype=tf.float32, name="w")
b = tf.Variable(0.0, dtype=tf.float32, name="b")
return (X, Y, w, b)
def get_loss(X, Y, w, b):
"""
定义损失函数
:return:
"""
# 线性回归模型
Y_that = X * w + b
# 平方损失
loss = tf.square(Y - Y_that, name="loss")
return loss
def get_optimizer(loss):
"""
获取优化方法
:param loss:
:return:
"""
# 随机梯度下降优化算法
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(loss)
# optimizer = tf.train.AdagradOptimizer(learning_rate=0.01).minimize(loss)
return optimizer
def show_data(X_train, Y_train, w, b):
"""
显示真实数据和预测数据之间的差异
:param X_train: 训练数据
:param Y_train: 标签
:param w: 权重值
:param b: 偏差值
:return:
"""
y_pred = X_train * w + b
# 对比预估值和真实值
plt.plot(X_train, Y_train, "bo", label="Real Data")
plt.plot(X_train, y_pred, "r", label="predict Data")
plt.legend()
plt.show()
def show_loss(total):
"""
展示损失函数变化图
:return:
"""
plt.plot(total)
plt.show()
def linear_regression():
"""
简单的线性回归模型
:return:
"""
X, Y, w, b = get_model()
loss = get_loss(X, Y, w, b)
optimizer = get_optimizer(loss)
linear_ops = tf.global_variables_initializer()
X_train, Y_train, n_samples = loaddata()
total = []
with tf.Session() as sess:
sess.run(linear_ops)
writer = tf.summary.FileWriter("linear_regression", sess.graph)
for i in range(100):
total_loss = 0
for x, y in zip(X_train, Y_train):
_, l = sess.run([optimizer, loss], feed_dict={X: x, Y: y})
total_loss += l
total.append(total_loss / n_samples)
logger.info("epoll {0} loss {1}".format(i, total_loss / n_samples))
writer.close()
w_val, b_val = sess.run([w, b])
logger.info("w {0},b {1}".format(w_val, b_val))
writer.close()
show_data(X_train, Y_train, w_val, b_val)
# 查看损失变化图
show_loss(total)
def k_linear_regression():
"""
k折交叉验证发
:return:
"""
from sklearn.model_selection import KFold
X, Y, w, b = get_model()
loss = get_loss(X, Y, w, b)
optimizer = get_optimizer(loss)
linear_ops = tf.global_variables_initializer()
X_train, Y_train, n_samples = loaddata()
total = []
n_splits = 5
with tf.Session() as sess:
sess.run(linear_ops)
writer = tf.summary.FileWriter("linear_regression", sess.graph)
for i in range(10):
total_train_loss = 0
total_test_loss = 0
k_fold = KFold(n_splits=n_splits, shuffle=False, random_state=None)
for train_index, test_index in k_fold.split(X_train, Y_train):
# 训练数据
for x, y in zip(X_train[train_index], Y_train[train_index]):
_, l = sess.run([optimizer, loss], feed_dict={X: x, Y: y})
total_train_loss += l / len(train_index)
# 测试数据 计算评估误差
w_val, b_val = sess.run([w, b])
test_loss = get_loss(X_train[test_index], Y_train[test_index], w_val, b_val)
total_test_loss += tf.reduce_mean(test_loss).eval()
total.append(total_train_loss / n_splits)
logger.info("epoll {0} train loss {1} test loss {2}".format(i, total_train_loss / n_splits,
total_test_loss / n_splits))
writer.close()
w_val, b_val = sess.run([w, b])
logger.info("w {0},b {1}".format(w_val, b_val))
writer.close()
# 对比预估值和真实值
show_data(X_train, Y_train, w_val, b_val)
# 查看损失变化图
show_loss(total)
def multiple_linear_regression():
"""
多特征的线性回归模型
:return:
"""
boston_data = datasets.load_boston()
X_train, Y_train = boston_data.data, boston_data.target
n_samples = len(X_train)
# 将偏差作为一列特征添加进去
X_train = np.c_[X_train, np.zeros(shape=(n_samples, 1))]
n_features = X_train.shape[1]
X_train = (X_train - np.mean(X_train)) / np.std(X_train)
Y_train = np.reshape(Y_train, newshape=(n_samples, 1))
# TODO 暂时不考虑偏差,考虑偏差就是把偏差也当做一个特征值
X = tf.placeholder(dtype=tf.float32, shape=(n_samples, n_features), name="X")
Y = tf.placeholder(dtype=tf.float32, shape=(n_samples, 1), name="Y")
w = tf.Variable(tf.random_normal(shape=(n_features, 1)), name="w")
Y_that = tf.matmul(X, w)
loss = tf.reduce_mean(tf.square(Y - Y_that), name="loss")
optimizer = tf.train.GradientDescentOptimizer(0.001, name="optimizer").minimize(loss)
linear_ops = tf.global_variables_initializer()
total = []
with tf.Session() as sess:
sess.run(linear_ops)
writer = tf.summary.FileWriter("multiplinear_regression", sess.graph)
for i in range(1000):
_, total_loss = sess.run([optimizer, loss], feed_dict={X: X_train, Y: Y_train})
total.append(total_loss)
logger.info("epoll {0} loss {1}".format(i, total_loss / n_samples))
writer.close()
w_value = sess.run(w)
writer.close()
show_loss(total)
# 预测
n = 500
Y_pred = np.matmul(X_train[n, :], w_value)
logger.info("pred {0} real {1}".format(Y_pred[0], Y_train[n][0]))
|
zh
| 0.798739
|
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2019/10/23 10:43 # @Author : ganliang # @File : tf_linear_regression.py # @Desc : 单特征的线性回归 数据归一化处理 :param X: :return: 获取到数据集 :return: # X_train=normaize(X_train) 定义模型 :return: # 定义X,Y占位符 # 定义权重和偏差 定义损失函数 :return: # 线性回归模型 # 平方损失 获取优化方法 :param loss: :return: # 随机梯度下降优化算法 # optimizer = tf.train.AdagradOptimizer(learning_rate=0.01).minimize(loss) 显示真实数据和预测数据之间的差异 :param X_train: 训练数据 :param Y_train: 标签 :param w: 权重值 :param b: 偏差值 :return: # 对比预估值和真实值 展示损失函数变化图 :return: 简单的线性回归模型 :return: # 查看损失变化图 k折交叉验证发 :return: # 训练数据 # 测试数据 计算评估误差 # 对比预估值和真实值 # 查看损失变化图 多特征的线性回归模型 :return: # 将偏差作为一列特征添加进去 # TODO 暂时不考虑偏差,考虑偏差就是把偏差也当做一个特征值 # 预测
| 3.140103
| 3
|
Lib/lib2to3/fixes/fix_dict.py
|
orestis/python
| 1
|
6625667
|
<filename>Lib/lib2to3/fixes/fix_dict.py
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for dict methods.
d.keys() -> list(d.keys())
d.items() -> list(d.items())
d.values() -> list(d.values())
d.iterkeys() -> iter(d.keys())
d.iteritems() -> iter(d.items())
d.itervalues() -> iter(d.values())
Except in certain very specific contexts: the iter() can be dropped
when the context is list(), sorted(), iter() or for...in; the list()
can be dropped when the context is list() or sorted() (but not iter()
or for...in!). Special contexts that apply to both: list(), sorted(), tuple()
set(), any(), all(), sum().
Note: iter(d.keys()) could be written as iter(d) but since the
original d.iterkeys() was also redundant we don't fix this. And there
are (rare) contexts where it makes a difference (e.g. when passing it
as an argument to a function that introspects the argument).
"""
# Local imports
from .. import pytree
from .. import patcomp
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, LParen, RParen, ArgList, Dot
from .. import fixer_util
iter_exempt = fixer_util.consuming_calls | set(["iter"])
class FixDict(fixer_base.BaseFix):
PATTERN = """
power< head=any+
trailer< '.' method=('keys'|'items'|'values'|
'iterkeys'|'iteritems'|'itervalues') >
parens=trailer< '(' ')' >
tail=any*
>
"""
def transform(self, node, results):
head = results["head"]
method = results["method"][0] # Extract node for method name
tail = results["tail"]
syms = self.syms
method_name = method.value
isiter = method_name.startswith("iter")
if isiter:
method_name = method_name[4:]
assert method_name in ("keys", "items", "values"), repr(method)
head = [n.clone() for n in head]
tail = [n.clone() for n in tail]
special = not tail and self.in_special_context(node, isiter)
args = head + [pytree.Node(syms.trailer,
[Dot(),
Name(method_name,
prefix=method.prefix)]),
results["parens"].clone()]
new = pytree.Node(syms.power, args)
if not special:
new.prefix = ""
new = Call(Name("iter" if isiter else "list"), [new])
if tail:
new = pytree.Node(syms.power, [new] + tail)
new.prefix = node.prefix
return new
P1 = "power< func=NAME trailer< '(' node=any ')' > any* >"
p1 = patcomp.compile_pattern(P1)
P2 = """for_stmt< 'for' any 'in' node=any ':' any* >
| comp_for< 'for' any 'in' node=any any* >
"""
p2 = patcomp.compile_pattern(P2)
def in_special_context(self, node, isiter):
if node.parent is None:
return False
results = {}
if (node.parent.parent is not None and
self.p1.match(node.parent.parent, results) and
results["node"] is node):
if isiter:
# iter(d.iterkeys()) -> iter(d.keys()), etc.
return results["func"].value in iter_exempt
else:
# list(d.keys()) -> list(d.keys()), etc.
return results["func"].value in fixer_util.consuming_calls
if not isiter:
return False
# for ... in d.iterkeys() -> for ... in d.keys(), etc.
return self.p2.match(node.parent, results) and results["node"] is node
|
<filename>Lib/lib2to3/fixes/fix_dict.py
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for dict methods.
d.keys() -> list(d.keys())
d.items() -> list(d.items())
d.values() -> list(d.values())
d.iterkeys() -> iter(d.keys())
d.iteritems() -> iter(d.items())
d.itervalues() -> iter(d.values())
Except in certain very specific contexts: the iter() can be dropped
when the context is list(), sorted(), iter() or for...in; the list()
can be dropped when the context is list() or sorted() (but not iter()
or for...in!). Special contexts that apply to both: list(), sorted(), tuple()
set(), any(), all(), sum().
Note: iter(d.keys()) could be written as iter(d) but since the
original d.iterkeys() was also redundant we don't fix this. And there
are (rare) contexts where it makes a difference (e.g. when passing it
as an argument to a function that introspects the argument).
"""
# Local imports
from .. import pytree
from .. import patcomp
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, LParen, RParen, ArgList, Dot
from .. import fixer_util
iter_exempt = fixer_util.consuming_calls | set(["iter"])
class FixDict(fixer_base.BaseFix):
PATTERN = """
power< head=any+
trailer< '.' method=('keys'|'items'|'values'|
'iterkeys'|'iteritems'|'itervalues') >
parens=trailer< '(' ')' >
tail=any*
>
"""
def transform(self, node, results):
head = results["head"]
method = results["method"][0] # Extract node for method name
tail = results["tail"]
syms = self.syms
method_name = method.value
isiter = method_name.startswith("iter")
if isiter:
method_name = method_name[4:]
assert method_name in ("keys", "items", "values"), repr(method)
head = [n.clone() for n in head]
tail = [n.clone() for n in tail]
special = not tail and self.in_special_context(node, isiter)
args = head + [pytree.Node(syms.trailer,
[Dot(),
Name(method_name,
prefix=method.prefix)]),
results["parens"].clone()]
new = pytree.Node(syms.power, args)
if not special:
new.prefix = ""
new = Call(Name("iter" if isiter else "list"), [new])
if tail:
new = pytree.Node(syms.power, [new] + tail)
new.prefix = node.prefix
return new
P1 = "power< func=NAME trailer< '(' node=any ')' > any* >"
p1 = patcomp.compile_pattern(P1)
P2 = """for_stmt< 'for' any 'in' node=any ':' any* >
| comp_for< 'for' any 'in' node=any any* >
"""
p2 = patcomp.compile_pattern(P2)
def in_special_context(self, node, isiter):
if node.parent is None:
return False
results = {}
if (node.parent.parent is not None and
self.p1.match(node.parent.parent, results) and
results["node"] is node):
if isiter:
# iter(d.iterkeys()) -> iter(d.keys()), etc.
return results["func"].value in iter_exempt
else:
# list(d.keys()) -> list(d.keys()), etc.
return results["func"].value in fixer_util.consuming_calls
if not isiter:
return False
# for ... in d.iterkeys() -> for ... in d.keys(), etc.
return self.p2.match(node.parent, results) and results["node"] is node
|
en
| 0.55099
|
# Copyright 2007 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. Fixer for dict methods. d.keys() -> list(d.keys()) d.items() -> list(d.items()) d.values() -> list(d.values()) d.iterkeys() -> iter(d.keys()) d.iteritems() -> iter(d.items()) d.itervalues() -> iter(d.values()) Except in certain very specific contexts: the iter() can be dropped when the context is list(), sorted(), iter() or for...in; the list() can be dropped when the context is list() or sorted() (but not iter() or for...in!). Special contexts that apply to both: list(), sorted(), tuple() set(), any(), all(), sum(). Note: iter(d.keys()) could be written as iter(d) but since the original d.iterkeys() was also redundant we don't fix this. And there are (rare) contexts where it makes a difference (e.g. when passing it as an argument to a function that introspects the argument). # Local imports power< head=any+ trailer< '.' method=('keys'|'items'|'values'| 'iterkeys'|'iteritems'|'itervalues') > parens=trailer< '(' ')' > tail=any* > # Extract node for method name for_stmt< 'for' any 'in' node=any ':' any* > | comp_for< 'for' any 'in' node=any any* > # iter(d.iterkeys()) -> iter(d.keys()), etc. # list(d.keys()) -> list(d.keys()), etc. # for ... in d.iterkeys() -> for ... in d.keys(), etc.
| 2.289939
| 2
|
FWCore/Skeletons/python/cms.py
|
nistefan/cmssw
| 0
|
6625668
|
<gh_stars>0
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#pylint: disable-msg=
"""
File : cms.py
Author : <NAME> <<EMAIL>>
Description: CMS-related utils
"""
# system modules
import os
import sys
# package modules
from FWCore.Skeletons.utils import code_generator
def config(tmpl, pkg_help, tmpl_dir):
"Parse input arguments to mk-script"
kwds = {'author': '', 'tmpl': tmpl,
'args': {}, 'debug': False,
'tmpl_dir': tmpl_dir, 'working_dir': ''}
etags = []
if len(sys.argv) >= 2: # user give us arguments
if sys.argv[1] in ['-h', '--help', '-help']:
print pkg_help
sys.exit(0)
kwds['pname'] = sys.argv[1]
for idx in xrange(2, len(sys.argv)):
opt = sys.argv[idx]
if opt == '-author':
kwds['author'] = sys.argv[idx+1]
continue
if opt.find('example') != -1:
etags.append('@%s' % opt)
continue
if opt in ['-h', '--help', '-help']:
print pkg_help
sys.exit(0)
if opt == '-debug':
kwds['debug'] = True
continue
elif len(sys.argv) == 1:
# need to walk
msg = 'Please enter %s name: ' % tmpl.lower()
kwds['pname'] = raw_input(msg)
else:
print pkg_help
sys.exit(0)
kwds['tmpl_etags'] = etags
return kwds
def config_with_parser(tmpl, args, tmpl_dir):
"""
Inject arguments parsed upstream into mk-scripts.
The arguments are parsed by the different front-ends(binaries)
and passed here via the args object.
"""
kwds = {'author': '', 'tmpl': tmpl,
'args': {}, 'debug': False, 'tmpl_dir': tmpl_dir}
etags = []
kwds['pname'] = args.subpackage_name
if args.author: kwds['author'] = args.author
if args.debug: kwds['debug'] = True
if args.example: etags.append('@%s' % args.example)
kwds['tmpl_etags'] = etags
return kwds
def cms_error():
"Standard CMS error message"
msg = "\nPackages must be created in a 'subsystem'."
msg += "\nPlease set your CMSSW environment and go to $CMSSW_BASE/src"
msg += "\nCreate or choose directory from there and then "
msg += "\nrun the script from that directory"
return msg
def test_cms_environment(tmpl):
"""
Test CMS environment and requirements to run within CMSSW_BASE.
Return True if we fullfill requirements and False otherwise.
"""
base = os.environ.get('CMSSW_BASE', None)
if not base:
return False, []
cdir = os.getcwd()
ldir = cdir.replace(os.path.join(base, 'src'), '')
dirs = ldir.split('/')
# test if we're within CMSSW_BASE/src/SubSystem area
if ldir and ldir[0] == '/' and len(dirs) == 2:
return 'subsystem', ldir
# test if we're within CMSSW_BASE/src/SubSystem/Pkg area
if ldir and ldir[0] == '/' and len(dirs) == 3:
return 'package', ldir
# test if we're within CMSSW_BASE/src/SubSystem/Pkg/src area
# if ldir and ldir[0] == '/' and len(dirs) == 4 and dirs[-1] == 'src':
# return 'src', ldir
# test if we're within CMSSW_BASE/src/SubSystem/Pkg/plugin area
# if ldir and ldir[0] == '/' and len(dirs) == 4 and dirs[-1] == 'plugins':
# return 'plugins', ldir
# test if we're within CMSSW_BASE/src/SubSystem/Pkg/dir area
if ldir and ldir[0] == '/' and len(dirs) == 4:
return dirs[-1], ldir
return False, ldir
def generate(kwds):
"Run generator code based on provided set of arguments"
config = dict(kwds)
tmpl = kwds.get('tmpl')
stand_alone_group = ['Record', 'Skeleton']
config.update({'not_in_dir': stand_alone_group})
if tmpl in stand_alone_group:
whereami, ldir = test_cms_environment(tmpl)
dirs = ldir.split('/')
config.update({'pkgname': kwds.get('pname')})
config.update({'subsystem': 'Subsystem'})
config.update({'pkgname': 'Package'})
if whereami:
if len(dirs) >= 3:
config.update({'subsystem': dirs[1]})
config.update({'pkgname': dirs[2]})
elif len(dirs) >= 2:
config.update({'subsystem': dirs[1]})
config.update({'pkgname': dirs[1]})
else:
whereami, ldir = test_cms_environment(tmpl)
dirs = ldir.split('/')
if not dirs or not whereami:
print cms_error()
sys.exit(1)
config.update({'subsystem': dirs[1]})
config.update({'pkgname': kwds.get('pname')})
if whereami in ['src', 'plugins']:
config.update({'working_dir': whereami})
config.update({'tmpl_files': '.cc'})
config.update({'pkgname': dirs[2]})
elif whereami == 'test':
config.update({'working_dir': whereami})
config.update({'tmpl_files':'.cc'})
config.update({'pkgname': dirs[2]})
elif whereami == 'subsystem':
config.update({'tmpl_files': 'all'})
else:
print cms_error()
sys.exit(1)
obj = code_generator(config)
obj.generate()
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#pylint: disable-msg=
"""
File : cms.py
Author : <NAME> <<EMAIL>>
Description: CMS-related utils
"""
# system modules
import os
import sys
# package modules
from FWCore.Skeletons.utils import code_generator
def config(tmpl, pkg_help, tmpl_dir):
"Parse input arguments to mk-script"
kwds = {'author': '', 'tmpl': tmpl,
'args': {}, 'debug': False,
'tmpl_dir': tmpl_dir, 'working_dir': ''}
etags = []
if len(sys.argv) >= 2: # user give us arguments
if sys.argv[1] in ['-h', '--help', '-help']:
print pkg_help
sys.exit(0)
kwds['pname'] = sys.argv[1]
for idx in xrange(2, len(sys.argv)):
opt = sys.argv[idx]
if opt == '-author':
kwds['author'] = sys.argv[idx+1]
continue
if opt.find('example') != -1:
etags.append('@%s' % opt)
continue
if opt in ['-h', '--help', '-help']:
print pkg_help
sys.exit(0)
if opt == '-debug':
kwds['debug'] = True
continue
elif len(sys.argv) == 1:
# need to walk
msg = 'Please enter %s name: ' % tmpl.lower()
kwds['pname'] = raw_input(msg)
else:
print pkg_help
sys.exit(0)
kwds['tmpl_etags'] = etags
return kwds
def config_with_parser(tmpl, args, tmpl_dir):
"""
Inject arguments parsed upstream into mk-scripts.
The arguments are parsed by the different front-ends(binaries)
and passed here via the args object.
"""
kwds = {'author': '', 'tmpl': tmpl,
'args': {}, 'debug': False, 'tmpl_dir': tmpl_dir}
etags = []
kwds['pname'] = args.subpackage_name
if args.author: kwds['author'] = args.author
if args.debug: kwds['debug'] = True
if args.example: etags.append('@%s' % args.example)
kwds['tmpl_etags'] = etags
return kwds
def cms_error():
"Standard CMS error message"
msg = "\nPackages must be created in a 'subsystem'."
msg += "\nPlease set your CMSSW environment and go to $CMSSW_BASE/src"
msg += "\nCreate or choose directory from there and then "
msg += "\nrun the script from that directory"
return msg
def test_cms_environment(tmpl):
"""
Test CMS environment and requirements to run within CMSSW_BASE.
Return True if we fullfill requirements and False otherwise.
"""
base = os.environ.get('CMSSW_BASE', None)
if not base:
return False, []
cdir = os.getcwd()
ldir = cdir.replace(os.path.join(base, 'src'), '')
dirs = ldir.split('/')
# test if we're within CMSSW_BASE/src/SubSystem area
if ldir and ldir[0] == '/' and len(dirs) == 2:
return 'subsystem', ldir
# test if we're within CMSSW_BASE/src/SubSystem/Pkg area
if ldir and ldir[0] == '/' and len(dirs) == 3:
return 'package', ldir
# test if we're within CMSSW_BASE/src/SubSystem/Pkg/src area
# if ldir and ldir[0] == '/' and len(dirs) == 4 and dirs[-1] == 'src':
# return 'src', ldir
# test if we're within CMSSW_BASE/src/SubSystem/Pkg/plugin area
# if ldir and ldir[0] == '/' and len(dirs) == 4 and dirs[-1] == 'plugins':
# return 'plugins', ldir
# test if we're within CMSSW_BASE/src/SubSystem/Pkg/dir area
if ldir and ldir[0] == '/' and len(dirs) == 4:
return dirs[-1], ldir
return False, ldir
def generate(kwds):
"Run generator code based on provided set of arguments"
config = dict(kwds)
tmpl = kwds.get('tmpl')
stand_alone_group = ['Record', 'Skeleton']
config.update({'not_in_dir': stand_alone_group})
if tmpl in stand_alone_group:
whereami, ldir = test_cms_environment(tmpl)
dirs = ldir.split('/')
config.update({'pkgname': kwds.get('pname')})
config.update({'subsystem': 'Subsystem'})
config.update({'pkgname': 'Package'})
if whereami:
if len(dirs) >= 3:
config.update({'subsystem': dirs[1]})
config.update({'pkgname': dirs[2]})
elif len(dirs) >= 2:
config.update({'subsystem': dirs[1]})
config.update({'pkgname': dirs[1]})
else:
whereami, ldir = test_cms_environment(tmpl)
dirs = ldir.split('/')
if not dirs or not whereami:
print cms_error()
sys.exit(1)
config.update({'subsystem': dirs[1]})
config.update({'pkgname': kwds.get('pname')})
if whereami in ['src', 'plugins']:
config.update({'working_dir': whereami})
config.update({'tmpl_files': '.cc'})
config.update({'pkgname': dirs[2]})
elif whereami == 'test':
config.update({'working_dir': whereami})
config.update({'tmpl_files':'.cc'})
config.update({'pkgname': dirs[2]})
elif whereami == 'subsystem':
config.update({'tmpl_files': 'all'})
else:
print cms_error()
sys.exit(1)
obj = code_generator(config)
obj.generate()
|
en
| 0.511637
|
#!/usr/bin/env python #-*- coding: utf-8 -*- #pylint: disable-msg= File : cms.py Author : <NAME> <<EMAIL>> Description: CMS-related utils # system modules # package modules # user give us arguments # need to walk Inject arguments parsed upstream into mk-scripts. The arguments are parsed by the different front-ends(binaries) and passed here via the args object. Test CMS environment and requirements to run within CMSSW_BASE. Return True if we fullfill requirements and False otherwise. # test if we're within CMSSW_BASE/src/SubSystem area # test if we're within CMSSW_BASE/src/SubSystem/Pkg area # test if we're within CMSSW_BASE/src/SubSystem/Pkg/src area # if ldir and ldir[0] == '/' and len(dirs) == 4 and dirs[-1] == 'src': # return 'src', ldir # test if we're within CMSSW_BASE/src/SubSystem/Pkg/plugin area # if ldir and ldir[0] == '/' and len(dirs) == 4 and dirs[-1] == 'plugins': # return 'plugins', ldir # test if we're within CMSSW_BASE/src/SubSystem/Pkg/dir area
| 2.467916
| 2
|
crawl_ccass(mysql+proxyip)/crawl_ccass/middlewares.py
|
easy00000000/spider
| 0
|
6625669
|
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy.conf import settings
import random
class RandomUserAgentMiddleware(object):
def process_request(self, request, spider):
ua = random.choice(settings.get('USER_AGENT_LIST'))
if ua:
request.headers.setdefault('User-Agent', ua)
class ProxyMiddleware(object):
def __init__(self):
self.proxy_list = settings.get('PROXY_FILE')
with open(self.proxy_list) as ip_file:
self.proxies = [ip.strip() for ip in ip_file]
def process_request(self, request, spider):
request.meta['proxy'] = 'http://{}'.format(random.choice(self.proxies))
print 'Crawling with IP ' + request.meta['proxy']
|
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy.conf import settings
import random
class RandomUserAgentMiddleware(object):
def process_request(self, request, spider):
ua = random.choice(settings.get('USER_AGENT_LIST'))
if ua:
request.headers.setdefault('User-Agent', ua)
class ProxyMiddleware(object):
def __init__(self):
self.proxy_list = settings.get('PROXY_FILE')
with open(self.proxy_list) as ip_file:
self.proxies = [ip.strip() for ip in ip_file]
def process_request(self, request, spider):
request.meta['proxy'] = 'http://{}'.format(random.choice(self.proxies))
print 'Crawling with IP ' + request.meta['proxy']
|
en
| 0.541728
|
# -*- coding: utf-8 -*- # Define here the models for your spider middleware # # See documentation in: # http://doc.scrapy.org/en/latest/topics/spider-middleware.html
| 2.575898
| 3
|
21/test/test_url_2_label_by_word_vector_controller_api.py
|
apitore/apitore-sdk-python
| 3
|
6625670
|
# coding: utf-8
"""
Url2Label by word vector APIs
Url to label by word2vec of contents.<BR />[Endpoint] https://api.apitore.com/api/21 # noqa: E501
OpenAPI spec version: 0.0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.api.url_2_label_by_word_vector_controller_api import Url2LabelByWordVectorControllerApi # noqa: E501
from swagger_client.rest import ApiException
class TestUrl2LabelByWordVectorControllerApi(unittest.TestCase):
"""Url2LabelByWordVectorControllerApi unit test stubs"""
def setUp(self):
self.api = swagger_client.api.url_2_label_by_word_vector_controller_api.Url2LabelByWordVectorControllerApi() # noqa: E501
def tearDown(self):
pass
def test_get_using_get4(self):
"""Test case for get_using_get4
Get labels from URL # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
# coding: utf-8
"""
Url2Label by word vector APIs
Url to label by word2vec of contents.<BR />[Endpoint] https://api.apitore.com/api/21 # noqa: E501
OpenAPI spec version: 0.0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.api.url_2_label_by_word_vector_controller_api import Url2LabelByWordVectorControllerApi # noqa: E501
from swagger_client.rest import ApiException
class TestUrl2LabelByWordVectorControllerApi(unittest.TestCase):
"""Url2LabelByWordVectorControllerApi unit test stubs"""
def setUp(self):
self.api = swagger_client.api.url_2_label_by_word_vector_controller_api.Url2LabelByWordVectorControllerApi() # noqa: E501
def tearDown(self):
pass
def test_get_using_get4(self):
"""Test case for get_using_get4
Get labels from URL # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
en
| 0.613116
|
# coding: utf-8 Url2Label by word vector APIs Url to label by word2vec of contents.<BR />[Endpoint] https://api.apitore.com/api/21 # noqa: E501 OpenAPI spec version: 0.0.1 Generated by: https://github.com/swagger-api/swagger-codegen.git # noqa: E501 Url2LabelByWordVectorControllerApi unit test stubs # noqa: E501 Test case for get_using_get4 Get labels from URL # noqa: E501
| 2.580001
| 3
|
tin2dem/plane_math.py
|
lekks/tin2dem
| 7
|
6625671
|
# https://www.geeksforgeeks.org/program-to-find-equation-of-a-plane-passing-through-3-points/
def equation_plane(p1, p2, p3):
(x1, y1, z1) = p1
(x2, y2, z2) = p2
(x3, y3, z3) = p3
a1 = x2 - x1
b1 = y2 - y1
c1 = z2 - z1
a2 = x3 - x1
b2 = y3 - y1
c2 = z3 - z1
a = b1 * c2 - b2 * c1
b = a2 * c1 - a1 * c2
c = a1 * b2 - b1 * a2
d = (- a * x1 - b * y1 - c * z1)
return a, b, c, d
def norm_z(abcd):
(a, b, c, d) = abcd
if not c:
raise ArithmeticError("Plane is orthogonal")
return -a / c, -b / c, -d / c
def solve_z(xy, z_abcd):
(x, y) = xy
(za, zb, zc) = z_abcd
return za * x + zb * y + zc
|
# https://www.geeksforgeeks.org/program-to-find-equation-of-a-plane-passing-through-3-points/
def equation_plane(p1, p2, p3):
(x1, y1, z1) = p1
(x2, y2, z2) = p2
(x3, y3, z3) = p3
a1 = x2 - x1
b1 = y2 - y1
c1 = z2 - z1
a2 = x3 - x1
b2 = y3 - y1
c2 = z3 - z1
a = b1 * c2 - b2 * c1
b = a2 * c1 - a1 * c2
c = a1 * b2 - b1 * a2
d = (- a * x1 - b * y1 - c * z1)
return a, b, c, d
def norm_z(abcd):
(a, b, c, d) = abcd
if not c:
raise ArithmeticError("Plane is orthogonal")
return -a / c, -b / c, -d / c
def solve_z(xy, z_abcd):
(x, y) = xy
(za, zb, zc) = z_abcd
return za * x + zb * y + zc
|
en
| 0.536825
|
# https://www.geeksforgeeks.org/program-to-find-equation-of-a-plane-passing-through-3-points/
| 3.82157
| 4
|
mahjong/hand_calculating/yaku_list/sanshoku_douko.py
|
Enerccio/mahjong
| 0
|
6625672
|
<filename>mahjong/hand_calculating/yaku_list/sanshoku_douko.py
# -*- coding: utf-8 -*-
from mahjong.hand_calculating.yaku import Yaku
from mahjong.utils import is_pon, is_sou, is_pin, is_man, simplify
class SanshokuDoukou(Yaku):
"""
Three pon sets consisting of the same numbers in all three suits
"""
def __init__(self, yaku_id):
super(SanshokuDoukou, self).__init__(yaku_id)
def set_attributes(self):
self.tenhou_id = 26
self.name = '<NAME>'
self.english = 'Three Colored Triplets'
self.han_open = 2
self.han_closed = 2
self.is_yakuman = False
def is_condition_met(self, hand, *args):
pon_sets = [i for i in hand if is_pon(i)]
if len(pon_sets) < 3:
return False
sou_pon = []
pin_pon = []
man_pon = []
for item in pon_sets:
if is_sou(item[0]):
sou_pon.append(item)
elif is_pin(item[0]):
pin_pon.append(item)
elif is_man(item[0]):
man_pon.append(item)
for sou_item in sou_pon:
for pin_item in pin_pon:
for man_item in man_pon:
# cast tile indices to 1..9 representation
sou_item = [simplify(x) for x in sou_item]
pin_item = [simplify(x) for x in pin_item]
man_item = [simplify(x) for x in man_item]
if sou_item == pin_item == man_item:
return True
return False
|
<filename>mahjong/hand_calculating/yaku_list/sanshoku_douko.py
# -*- coding: utf-8 -*-
from mahjong.hand_calculating.yaku import Yaku
from mahjong.utils import is_pon, is_sou, is_pin, is_man, simplify
class SanshokuDoukou(Yaku):
"""
Three pon sets consisting of the same numbers in all three suits
"""
def __init__(self, yaku_id):
super(SanshokuDoukou, self).__init__(yaku_id)
def set_attributes(self):
self.tenhou_id = 26
self.name = '<NAME>'
self.english = 'Three Colored Triplets'
self.han_open = 2
self.han_closed = 2
self.is_yakuman = False
def is_condition_met(self, hand, *args):
pon_sets = [i for i in hand if is_pon(i)]
if len(pon_sets) < 3:
return False
sou_pon = []
pin_pon = []
man_pon = []
for item in pon_sets:
if is_sou(item[0]):
sou_pon.append(item)
elif is_pin(item[0]):
pin_pon.append(item)
elif is_man(item[0]):
man_pon.append(item)
for sou_item in sou_pon:
for pin_item in pin_pon:
for man_item in man_pon:
# cast tile indices to 1..9 representation
sou_item = [simplify(x) for x in sou_item]
pin_item = [simplify(x) for x in pin_item]
man_item = [simplify(x) for x in man_item]
if sou_item == pin_item == man_item:
return True
return False
|
en
| 0.86291
|
# -*- coding: utf-8 -*- Three pon sets consisting of the same numbers in all three suits # cast tile indices to 1..9 representation
| 3.101743
| 3
|
openapi_spec_validator/handlers/file.py
|
sthagen/openapi-spec-validator
| 1
|
6625673
|
<filename>openapi_spec_validator/handlers/file.py<gh_stars>1-10
"""OpenAPI spec validator handlers file module."""
import io
import json
from yaml import load
from openapi_spec_validator.handlers.base import BaseHandler
from openapi_spec_validator.handlers.compat import SafeLoader
from openapi_spec_validator.handlers.utils import uri_to_path
class FileObjectHandler(BaseHandler):
"""OpenAPI spec validator file-like object handler."""
def __init__(self, loader=SafeLoader):
self.loader = loader
def __call__(self, f):
return json.loads(json.dumps(load(f, self.loader)))
class FileHandler(FileObjectHandler):
"""OpenAPI spec validator file path handler."""
def __call__(self, uri):
if isinstance(uri, io.StringIO):
return super(FileHandler, self).__call__(uri)
assert uri.startswith("file")
filepath = uri_to_path(uri)
with open(filepath) as fh:
return super(FileHandler, self).__call__(fh)
|
<filename>openapi_spec_validator/handlers/file.py<gh_stars>1-10
"""OpenAPI spec validator handlers file module."""
import io
import json
from yaml import load
from openapi_spec_validator.handlers.base import BaseHandler
from openapi_spec_validator.handlers.compat import SafeLoader
from openapi_spec_validator.handlers.utils import uri_to_path
class FileObjectHandler(BaseHandler):
"""OpenAPI spec validator file-like object handler."""
def __init__(self, loader=SafeLoader):
self.loader = loader
def __call__(self, f):
return json.loads(json.dumps(load(f, self.loader)))
class FileHandler(FileObjectHandler):
"""OpenAPI spec validator file path handler."""
def __call__(self, uri):
if isinstance(uri, io.StringIO):
return super(FileHandler, self).__call__(uri)
assert uri.startswith("file")
filepath = uri_to_path(uri)
with open(filepath) as fh:
return super(FileHandler, self).__call__(fh)
|
en
| 0.315364
|
OpenAPI spec validator handlers file module. OpenAPI spec validator file-like object handler. OpenAPI spec validator file path handler.
| 2.340891
| 2
|
packages/serpent-msbuild/__init__.py
|
phr34k/serpent
| 1
|
6625674
|
<reponame>phr34k/serpent
import sys, os;
base_dir = os.path.dirname(os.path.realpath(__file__))
if sys.maxsize > 2**32:
sys.path.append( os.path.join(base_dir, "x64"))
else:
sys.path.append( os.path.join(base_dir, "x86"))
|
import sys, os;
base_dir = os.path.dirname(os.path.realpath(__file__))
if sys.maxsize > 2**32:
sys.path.append( os.path.join(base_dir, "x64"))
else:
sys.path.append( os.path.join(base_dir, "x86"))
|
none
| 1
| 2.303981
| 2
|
|
tensealstat/statistic/student_t_repeated_measures.py
|
kozzion/tensealstat
| 6
|
6625675
|
import math
from tensealstat.algebra.abstract_algebra import AbstractAlgebra
from tensealstat.statistic.abstract_statistic import AbstractStatistic
from tensealstat.statistic.test_assertion import TestAssertion
class StudentTRepeatedMeasures(object):
def encrypt_statistic(self, algebra:AbstractAlgebra, list_sample):
if len(list_sample) != 2:
raise Exception("StudentTRepeatedMeasures is a 2 sample procedure")
sample_0 = list_sample[0]
sample_1 = list_sample[1]
if algebra.size_vector(sample_0) != algebra.size_vector(sample_1):
raise Exception('Samples should be of equal size')
difference = sample_0 - sample_1
mean_difference = algebra.mean(difference)
variance = algebra.variance(difference)
variance_rescale = algebra.size_vector(sample_0)
degrees_of_freedom = algebra.size_vector(sample_0) - 1
#TODO these all need to get pickled properly or jsonsed or something so they can be moved over http
statistic_encoded = {}
statistic_encoded['type_statistic'] = 'student_t_repeated_measures'
statistic_encoded['mean_difference'] = mean_difference
statistic_encoded['variance'] = variance
statistic_encoded['variance_rescale'] = variance_rescale
statistic_encoded['degrees_of_freedom'] = degrees_of_freedom
return statistic_encoded
def decrypt_statistic(self, algebra:AbstractAlgebra, statistic_encoded):
if not statistic_encoded['type_statistic'] == 'student_t_repeated_measures':
raise Exception('Incorrect statistic')
mean_difference = algebra.decrypt_scalar(statistic_encoded['mean_difference'])
variance = algebra.decrypt_scalar(statistic_encoded['variance'])
variance_rescale = statistic_encoded['variance_rescale']
degrees_of_freedom = statistic_encoded['degrees_of_freedom']
t_statistic = mean_difference / (math.sqrt(variance) / math.sqrt(variance_rescale))
return t_statistic, degrees_of_freedom
|
import math
from tensealstat.algebra.abstract_algebra import AbstractAlgebra
from tensealstat.statistic.abstract_statistic import AbstractStatistic
from tensealstat.statistic.test_assertion import TestAssertion
class StudentTRepeatedMeasures(object):
def encrypt_statistic(self, algebra:AbstractAlgebra, list_sample):
if len(list_sample) != 2:
raise Exception("StudentTRepeatedMeasures is a 2 sample procedure")
sample_0 = list_sample[0]
sample_1 = list_sample[1]
if algebra.size_vector(sample_0) != algebra.size_vector(sample_1):
raise Exception('Samples should be of equal size')
difference = sample_0 - sample_1
mean_difference = algebra.mean(difference)
variance = algebra.variance(difference)
variance_rescale = algebra.size_vector(sample_0)
degrees_of_freedom = algebra.size_vector(sample_0) - 1
#TODO these all need to get pickled properly or jsonsed or something so they can be moved over http
statistic_encoded = {}
statistic_encoded['type_statistic'] = 'student_t_repeated_measures'
statistic_encoded['mean_difference'] = mean_difference
statistic_encoded['variance'] = variance
statistic_encoded['variance_rescale'] = variance_rescale
statistic_encoded['degrees_of_freedom'] = degrees_of_freedom
return statistic_encoded
def decrypt_statistic(self, algebra:AbstractAlgebra, statistic_encoded):
if not statistic_encoded['type_statistic'] == 'student_t_repeated_measures':
raise Exception('Incorrect statistic')
mean_difference = algebra.decrypt_scalar(statistic_encoded['mean_difference'])
variance = algebra.decrypt_scalar(statistic_encoded['variance'])
variance_rescale = statistic_encoded['variance_rescale']
degrees_of_freedom = statistic_encoded['degrees_of_freedom']
t_statistic = mean_difference / (math.sqrt(variance) / math.sqrt(variance_rescale))
return t_statistic, degrees_of_freedom
|
en
| 0.923115
|
#TODO these all need to get pickled properly or jsonsed or something so they can be moved over http
| 2.823844
| 3
|
contrib/PyTorch/Official/cv/image_classification/SPNASNet_100_for_PyTorch/timm/models/layers/cond_conv2d.py
|
Ascend/modelzoo
| 12
|
6625676
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" PyTorch Conditionally Parameterized Convolution (CondConv)
Paper: CondConv: Conditionally Parameterized Convolutions for Efficient Inference
(https://arxiv.org/abs/1904.04971)
Hacked together by / Copyright 2020 <NAME>
"""
import math
from functools import partial
import numpy as np
import torch
from torch import nn as nn
from torch.nn import functional as F
from .helpers import to_2tuple
from .conv2d_same import conv2d_same
from .padding import get_padding_value
def get_condconv_initializer(initializer, num_experts, expert_shape):
def condconv_initializer(weight):
"""CondConv initializer function."""
num_params = np.prod(expert_shape)
if (len(weight.shape) != 2 or weight.shape[0] != num_experts or
weight.shape[1] != num_params):
raise (ValueError(
'CondConv variables must have shape [num_experts, num_params]'))
for i in range(num_experts):
initializer(weight[i].view(expert_shape))
return condconv_initializer
class CondConv2d(nn.Module):
""" Conditionally Parameterized Convolution
Inspired by: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/condconv/condconv_layers.py
Grouped convolution hackery for parallel execution of the per-sample kernel filters inspired by this discussion:
https://github.com/pytorch/pytorch/issues/17983
"""
__constants__ = ['in_channels', 'out_channels', 'dynamic_padding']
def __init__(self, in_channels, out_channels, kernel_size=3,
stride=1, padding='', dilation=1, groups=1, bias=False, num_experts=4):
super(CondConv2d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = to_2tuple(kernel_size)
self.stride = to_2tuple(stride)
padding_val, is_padding_dynamic = get_padding_value(
padding, kernel_size, stride=stride, dilation=dilation)
self.dynamic_padding = is_padding_dynamic # if in forward to work with torchscript
self.padding = to_2tuple(padding_val)
self.dilation = to_2tuple(dilation)
self.groups = groups
self.num_experts = num_experts
self.weight_shape = (self.out_channels, self.in_channels // self.groups) + self.kernel_size
weight_num_param = 1
for wd in self.weight_shape:
weight_num_param *= wd
self.weight = torch.nn.Parameter(torch.Tensor(self.num_experts, weight_num_param))
if bias:
self.bias_shape = (self.out_channels,)
self.bias = torch.nn.Parameter(torch.Tensor(self.num_experts, self.out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
init_weight = get_condconv_initializer(
partial(nn.init.kaiming_uniform_, a=math.sqrt(5)), self.num_experts, self.weight_shape)
init_weight(self.weight)
if self.bias is not None:
fan_in = np.prod(self.weight_shape[1:])
bound = 1 / math.sqrt(fan_in)
init_bias = get_condconv_initializer(
partial(nn.init.uniform_, a=-bound, b=bound), self.num_experts, self.bias_shape)
init_bias(self.bias)
def forward(self, x, routing_weights):
B, C, H, W = x.shape
weight = torch.matmul(routing_weights, self.weight)
new_weight_shape = (B * self.out_channels, self.in_channels // self.groups) + self.kernel_size
weight = weight.view(new_weight_shape)
bias = None
if self.bias is not None:
bias = torch.matmul(routing_weights, self.bias)
bias = bias.view(B * self.out_channels)
# move batch elements with channels so each batch element can be efficiently convolved with separate kernel
x = x.view(1, B * C, H, W)
if self.dynamic_padding:
out = conv2d_same(
x, weight, bias, stride=self.stride, padding=self.padding,
dilation=self.dilation, groups=self.groups * B)
else:
out = F.conv2d(
x, weight, bias, stride=self.stride, padding=self.padding,
dilation=self.dilation, groups=self.groups * B)
out = out.permute([1, 0, 2, 3]).view(B, self.out_channels, out.shape[-2], out.shape[-1])
# Literal port (from TF definition)
# x = torch.split(x, 1, 0)
# weight = torch.split(weight, 1, 0)
# if self.bias is not None:
# bias = torch.matmul(routing_weights, self.bias)
# bias = torch.split(bias, 1, 0)
# else:
# bias = [None] * B
# out = []
# for xi, wi, bi in zip(x, weight, bias):
# wi = wi.view(*self.weight_shape)
# if bi is not None:
# bi = bi.view(*self.bias_shape)
# out.append(self.conv_fn(
# xi, wi, bi, stride=self.stride, padding=self.padding,
# dilation=self.dilation, groups=self.groups))
# out = torch.cat(out, 0)
return out
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" PyTorch Conditionally Parameterized Convolution (CondConv)
Paper: CondConv: Conditionally Parameterized Convolutions for Efficient Inference
(https://arxiv.org/abs/1904.04971)
Hacked together by / Copyright 2020 <NAME>
"""
import math
from functools import partial
import numpy as np
import torch
from torch import nn as nn
from torch.nn import functional as F
from .helpers import to_2tuple
from .conv2d_same import conv2d_same
from .padding import get_padding_value
def get_condconv_initializer(initializer, num_experts, expert_shape):
def condconv_initializer(weight):
"""CondConv initializer function."""
num_params = np.prod(expert_shape)
if (len(weight.shape) != 2 or weight.shape[0] != num_experts or
weight.shape[1] != num_params):
raise (ValueError(
'CondConv variables must have shape [num_experts, num_params]'))
for i in range(num_experts):
initializer(weight[i].view(expert_shape))
return condconv_initializer
class CondConv2d(nn.Module):
""" Conditionally Parameterized Convolution
Inspired by: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/condconv/condconv_layers.py
Grouped convolution hackery for parallel execution of the per-sample kernel filters inspired by this discussion:
https://github.com/pytorch/pytorch/issues/17983
"""
__constants__ = ['in_channels', 'out_channels', 'dynamic_padding']
def __init__(self, in_channels, out_channels, kernel_size=3,
stride=1, padding='', dilation=1, groups=1, bias=False, num_experts=4):
super(CondConv2d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = to_2tuple(kernel_size)
self.stride = to_2tuple(stride)
padding_val, is_padding_dynamic = get_padding_value(
padding, kernel_size, stride=stride, dilation=dilation)
self.dynamic_padding = is_padding_dynamic # if in forward to work with torchscript
self.padding = to_2tuple(padding_val)
self.dilation = to_2tuple(dilation)
self.groups = groups
self.num_experts = num_experts
self.weight_shape = (self.out_channels, self.in_channels // self.groups) + self.kernel_size
weight_num_param = 1
for wd in self.weight_shape:
weight_num_param *= wd
self.weight = torch.nn.Parameter(torch.Tensor(self.num_experts, weight_num_param))
if bias:
self.bias_shape = (self.out_channels,)
self.bias = torch.nn.Parameter(torch.Tensor(self.num_experts, self.out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
init_weight = get_condconv_initializer(
partial(nn.init.kaiming_uniform_, a=math.sqrt(5)), self.num_experts, self.weight_shape)
init_weight(self.weight)
if self.bias is not None:
fan_in = np.prod(self.weight_shape[1:])
bound = 1 / math.sqrt(fan_in)
init_bias = get_condconv_initializer(
partial(nn.init.uniform_, a=-bound, b=bound), self.num_experts, self.bias_shape)
init_bias(self.bias)
def forward(self, x, routing_weights):
B, C, H, W = x.shape
weight = torch.matmul(routing_weights, self.weight)
new_weight_shape = (B * self.out_channels, self.in_channels // self.groups) + self.kernel_size
weight = weight.view(new_weight_shape)
bias = None
if self.bias is not None:
bias = torch.matmul(routing_weights, self.bias)
bias = bias.view(B * self.out_channels)
# move batch elements with channels so each batch element can be efficiently convolved with separate kernel
x = x.view(1, B * C, H, W)
if self.dynamic_padding:
out = conv2d_same(
x, weight, bias, stride=self.stride, padding=self.padding,
dilation=self.dilation, groups=self.groups * B)
else:
out = F.conv2d(
x, weight, bias, stride=self.stride, padding=self.padding,
dilation=self.dilation, groups=self.groups * B)
out = out.permute([1, 0, 2, 3]).view(B, self.out_channels, out.shape[-2], out.shape[-1])
# Literal port (from TF definition)
# x = torch.split(x, 1, 0)
# weight = torch.split(weight, 1, 0)
# if self.bias is not None:
# bias = torch.matmul(routing_weights, self.bias)
# bias = torch.split(bias, 1, 0)
# else:
# bias = [None] * B
# out = []
# for xi, wi, bi in zip(x, weight, bias):
# wi = wi.view(*self.weight_shape)
# if bi is not None:
# bi = bi.view(*self.bias_shape)
# out.append(self.conv_fn(
# xi, wi, bi, stride=self.stride, padding=self.padding,
# dilation=self.dilation, groups=self.groups))
# out = torch.cat(out, 0)
return out
|
en
| 0.739719
|
# Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ PyTorch Conditionally Parameterized Convolution (CondConv) Paper: CondConv: Conditionally Parameterized Convolutions for Efficient Inference (https://arxiv.org/abs/1904.04971) Hacked together by / Copyright 2020 <NAME> CondConv initializer function. Conditionally Parameterized Convolution Inspired by: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/condconv/condconv_layers.py Grouped convolution hackery for parallel execution of the per-sample kernel filters inspired by this discussion: https://github.com/pytorch/pytorch/issues/17983 # if in forward to work with torchscript # move batch elements with channels so each batch element can be efficiently convolved with separate kernel # Literal port (from TF definition) # x = torch.split(x, 1, 0) # weight = torch.split(weight, 1, 0) # if self.bias is not None: # bias = torch.matmul(routing_weights, self.bias) # bias = torch.split(bias, 1, 0) # else: # bias = [None] * B # out = [] # for xi, wi, bi in zip(x, weight, bias): # wi = wi.view(*self.weight_shape) # if bi is not None: # bi = bi.view(*self.bias_shape) # out.append(self.conv_fn( # xi, wi, bi, stride=self.stride, padding=self.padding, # dilation=self.dilation, groups=self.groups)) # out = torch.cat(out, 0)
| 2.205783
| 2
|
scripts/get-relpath.py
|
cr1901/libmodem
| 0
|
6625677
|
<filename>scripts/get-relpath.py
#!/usr/bin/env python3
import os.path
import sys
if __name__ == "__main__":
print(os.path.relpath(sys.argv[1], sys.argv[2]))
|
<filename>scripts/get-relpath.py
#!/usr/bin/env python3
import os.path
import sys
if __name__ == "__main__":
print(os.path.relpath(sys.argv[1], sys.argv[2]))
|
fr
| 0.221828
|
#!/usr/bin/env python3
| 2.55193
| 3
|
bwdist.py
|
KitwareMedical/TubeTK-pypbm
| 6
|
6625678
|
"""bwdist.py
"""
__license__ = "Apache License, Version 2.0"
__author__ = "<NAME>, Kitware Inc., 2013"
__email__ = "E-Mail: <EMAIL>"
__status__ = "Development"
from optparse import OptionParser
import SimpleITK as sitk
import numpy as np
import sys
import os
def invert(orgImg):
"""Compute image inverse.
Paramters
---------
orgImg : sitk.Image
Input image.
Returns
-------
invImg : sitk.Image
Inverse of input image.
"""
orgMat = sitk.GetArrayFromImage(orgImg)
minVal = np.amin(orgMat)
maxVal = np.amax(orgMat)
invMat = (maxVal + minVal) - orgMat
invImg = sitk.GetImageFromArray(invMat)
imgImg.CopyInformation(orgImg)
def usage():
"""Print usage information"""
print("""
Take a binary image, apply skeletonization and output an inverted Euclidean
distance transform image.
USAGE:
{0} [OPTIONS]
{0} -h
OPTIONS (Overview):
-i NUM
-o NUM
-s
OPTIONS (Detailed):
-i FILE
FILE is the filename of the input image file.
-o FILE
FILE is the filename of the output distance image.
-s
If -s is specified, use the squared Euclidean distance.
AUTHOR: <NAME>, Kitware Inc., 2013
<EMAIL>
""".format(sys.argv[0]))
def main(argv=None):
if argv is None:
argv=sys.argv
parser = OptionParser(add_help_option=False)
parser.add_option("-l", dest="binFile")
parser.add_option("-o", dest="denFile")
parser.add_option("-s", dest="square", action="store_true", default=False)
parser.add_option("-h", dest="doHelp", action="store_true", default=False)
options, _ = parser.parse_args()
if options.doHelp:
usage()
sys.exit(-1)
binFile = options.binFile
denFile = options.denFile
useSquaredDistance = options.square
if not os.path.exists(binFile):
raise Exception("File %s does not exist!" % imgFile)
# read the image from disk
img = sitk.ReadImage(imgFile)
# generate skeleton
thinFilter = sitk.BinaryThinningImageFilter()
thinnedImg = thinFilter.Execute(img)
# compute the distance map
dmapFilter = sitk.DanielssonDistanceMapImageFilter()
dmapFilter.SetSquaredDistance(useSquaredDistance)
dmap = dmapFilter.Execute(thinnedImg)
pmap = invert(dmap)
# ... and write the image back to disk
sitk.WriteImage(pmap, denFile)
if __name__ == '__main__':
sys.exit(main())
|
"""bwdist.py
"""
__license__ = "Apache License, Version 2.0"
__author__ = "<NAME>, Kitware Inc., 2013"
__email__ = "E-Mail: <EMAIL>"
__status__ = "Development"
from optparse import OptionParser
import SimpleITK as sitk
import numpy as np
import sys
import os
def invert(orgImg):
"""Compute image inverse.
Paramters
---------
orgImg : sitk.Image
Input image.
Returns
-------
invImg : sitk.Image
Inverse of input image.
"""
orgMat = sitk.GetArrayFromImage(orgImg)
minVal = np.amin(orgMat)
maxVal = np.amax(orgMat)
invMat = (maxVal + minVal) - orgMat
invImg = sitk.GetImageFromArray(invMat)
imgImg.CopyInformation(orgImg)
def usage():
"""Print usage information"""
print("""
Take a binary image, apply skeletonization and output an inverted Euclidean
distance transform image.
USAGE:
{0} [OPTIONS]
{0} -h
OPTIONS (Overview):
-i NUM
-o NUM
-s
OPTIONS (Detailed):
-i FILE
FILE is the filename of the input image file.
-o FILE
FILE is the filename of the output distance image.
-s
If -s is specified, use the squared Euclidean distance.
AUTHOR: <NAME>, Kitware Inc., 2013
<EMAIL>
""".format(sys.argv[0]))
def main(argv=None):
if argv is None:
argv=sys.argv
parser = OptionParser(add_help_option=False)
parser.add_option("-l", dest="binFile")
parser.add_option("-o", dest="denFile")
parser.add_option("-s", dest="square", action="store_true", default=False)
parser.add_option("-h", dest="doHelp", action="store_true", default=False)
options, _ = parser.parse_args()
if options.doHelp:
usage()
sys.exit(-1)
binFile = options.binFile
denFile = options.denFile
useSquaredDistance = options.square
if not os.path.exists(binFile):
raise Exception("File %s does not exist!" % imgFile)
# read the image from disk
img = sitk.ReadImage(imgFile)
# generate skeleton
thinFilter = sitk.BinaryThinningImageFilter()
thinnedImg = thinFilter.Execute(img)
# compute the distance map
dmapFilter = sitk.DanielssonDistanceMapImageFilter()
dmapFilter.SetSquaredDistance(useSquaredDistance)
dmap = dmapFilter.Execute(thinnedImg)
pmap = invert(dmap)
# ... and write the image back to disk
sitk.WriteImage(pmap, denFile)
if __name__ == '__main__':
sys.exit(main())
|
en
| 0.542937
|
bwdist.py Compute image inverse. Paramters --------- orgImg : sitk.Image Input image. Returns ------- invImg : sitk.Image Inverse of input image. Print usage information Take a binary image, apply skeletonization and output an inverted Euclidean distance transform image. USAGE: {0} [OPTIONS] {0} -h OPTIONS (Overview): -i NUM -o NUM -s OPTIONS (Detailed): -i FILE FILE is the filename of the input image file. -o FILE FILE is the filename of the output distance image. -s If -s is specified, use the squared Euclidean distance. AUTHOR: <NAME>, Kitware Inc., 2013 <EMAIL> # read the image from disk # generate skeleton # compute the distance map # ... and write the image back to disk
| 2.666479
| 3
|
apps/reports/util.py
|
commtrack/commtrack-old-to-del
| 1
|
6625679
|
<filename>apps/reports/util.py
"""
Utility functions for report framework
"""
import inspect
from reports.custom.all import default
def is_mod_function(mod, func):
'''Returns whether the object is a function in the module'''
return inspect.isfunction(func) and inspect.getmodule(func) == mod
def get_custom_report_module(domain):
'''Get the reports module for a domain, if it exists. Otherwise
this returns nothing'''
return _safe_import("reports.custom.%s" % domain.name.lower())
def get_global_report_module(domain):
'''Get the global reports module for a domain.'''
module = _safe_import("reports.custom.all.%s" % domain.name.lower())
if not module:
module = default
return module
def get_report_method(domain, report_name):
"""Gets a domained report by name, checking first the explicit
custom reports and then the domain defaults. If no such
report is found, returns None"""
report_module = get_custom_report_module(domain)
if report_module and hasattr(report_module, report_name):
return getattr(report_module, report_name)
default_module = get_global_report_module(domain)
if default_module and hasattr(default_module, report_name):
return getattr(default_module, report_name)
return None
def get_custom_reports(domain):
"""Gets all the custom reports for the domain (including any global
default reports)"""
custom_report_module = get_custom_report_module(domain)
if custom_report_module:
custom = extract_custom_reports(custom_report_module)
else:
custom = []
default_report_module = get_global_report_module(domain)
custom.extend(extract_custom_reports(default_report_module))
return custom
def extract_custom_reports(report_module):
'''Given a reports module , get the list of custom reports defined
in that class. These are returned as dictionaries of the
following format:
{ "name" : function_name, "display_name" : function_doc }
see reports/custom.py for more information
'''
to_return = []
for name in dir(report_module):
obj = getattr(report_module, name)
# using ismethod filters out the builtins and any
# other fields defined in the custom class.
# also use the python convention of keeping methods
# that start with an "_" private.
if is_mod_function(report_module, obj) and\
not obj.func_name.startswith("_"):
obj_rep = {"name" : obj.func_name,
"display_name" : obj.__doc__
}
to_return.append(obj_rep)
return to_return
def get_whereclause(params):
"""Given a dictionary of params {key1: val1, key2: val2 }
return a partial query like:
WHERE key1 = val1
AND key2 = val2
...
"""
query_parts = []
first = False
for key, val in params.items():
if not first:
first = True
query_parts.append("WHERE %s = '%s'" % (key, val))
else:
query_parts.append("AND %s = '%s'" % (key, val))
return " ".join(query_parts)
def _safe_import(module_name):
try:
return __import__(module_name,
fromlist=[''])
except ImportError:
# this is ok, there just wasn't a module with custom reports
return None
|
<filename>apps/reports/util.py
"""
Utility functions for report framework
"""
import inspect
from reports.custom.all import default
def is_mod_function(mod, func):
'''Returns whether the object is a function in the module'''
return inspect.isfunction(func) and inspect.getmodule(func) == mod
def get_custom_report_module(domain):
'''Get the reports module for a domain, if it exists. Otherwise
this returns nothing'''
return _safe_import("reports.custom.%s" % domain.name.lower())
def get_global_report_module(domain):
'''Get the global reports module for a domain.'''
module = _safe_import("reports.custom.all.%s" % domain.name.lower())
if not module:
module = default
return module
def get_report_method(domain, report_name):
"""Gets a domained report by name, checking first the explicit
custom reports and then the domain defaults. If no such
report is found, returns None"""
report_module = get_custom_report_module(domain)
if report_module and hasattr(report_module, report_name):
return getattr(report_module, report_name)
default_module = get_global_report_module(domain)
if default_module and hasattr(default_module, report_name):
return getattr(default_module, report_name)
return None
def get_custom_reports(domain):
"""Gets all the custom reports for the domain (including any global
default reports)"""
custom_report_module = get_custom_report_module(domain)
if custom_report_module:
custom = extract_custom_reports(custom_report_module)
else:
custom = []
default_report_module = get_global_report_module(domain)
custom.extend(extract_custom_reports(default_report_module))
return custom
def extract_custom_reports(report_module):
'''Given a reports module , get the list of custom reports defined
in that class. These are returned as dictionaries of the
following format:
{ "name" : function_name, "display_name" : function_doc }
see reports/custom.py for more information
'''
to_return = []
for name in dir(report_module):
obj = getattr(report_module, name)
# using ismethod filters out the builtins and any
# other fields defined in the custom class.
# also use the python convention of keeping methods
# that start with an "_" private.
if is_mod_function(report_module, obj) and\
not obj.func_name.startswith("_"):
obj_rep = {"name" : obj.func_name,
"display_name" : obj.__doc__
}
to_return.append(obj_rep)
return to_return
def get_whereclause(params):
"""Given a dictionary of params {key1: val1, key2: val2 }
return a partial query like:
WHERE key1 = val1
AND key2 = val2
...
"""
query_parts = []
first = False
for key, val in params.items():
if not first:
first = True
query_parts.append("WHERE %s = '%s'" % (key, val))
else:
query_parts.append("AND %s = '%s'" % (key, val))
return " ".join(query_parts)
def _safe_import(module_name):
try:
return __import__(module_name,
fromlist=[''])
except ImportError:
# this is ok, there just wasn't a module with custom reports
return None
|
en
| 0.759609
|
Utility functions for report framework Returns whether the object is a function in the module Get the reports module for a domain, if it exists. Otherwise this returns nothing Get the global reports module for a domain. Gets a domained report by name, checking first the explicit custom reports and then the domain defaults. If no such report is found, returns None Gets all the custom reports for the domain (including any global default reports) Given a reports module , get the list of custom reports defined in that class. These are returned as dictionaries of the following format: { "name" : function_name, "display_name" : function_doc } see reports/custom.py for more information # using ismethod filters out the builtins and any # other fields defined in the custom class. # also use the python convention of keeping methods # that start with an "_" private. Given a dictionary of params {key1: val1, key2: val2 } return a partial query like: WHERE key1 = val1 AND key2 = val2 ... # this is ok, there just wasn't a module with custom reports
| 2.782321
| 3
|
setup.py
|
standanley/fault
| 0
|
6625680
|
<reponame>standanley/fault<filename>setup.py
"""
Setup file for fault
"""
from setuptools import setup
with open("README.md", "r") as fh:
LONG_DESCRIPTION = fh.read()
DESCRIPTION = """\
A Python package for testing hardware (part of the magma ecosystem)\
"""
setup(
name='fault',
version='2.0.19',
description=DESCRIPTION,
scripts=[],
packages=[
"fault",
],
install_requires=[
"astor",
"coreir",
"cosa",
"z3-solver",
"hwtypes",
"magma-lang",
"pyyaml",
"scipy",
"numpy"
],
license='BSD License',
url='https://github.com/leonardt/fault',
author='<NAME>',
author_email='<EMAIL>',
python_requires='>=3.6',
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown"
)
|
"""
Setup file for fault
"""
from setuptools import setup
with open("README.md", "r") as fh:
LONG_DESCRIPTION = fh.read()
DESCRIPTION = """\
A Python package for testing hardware (part of the magma ecosystem)\
"""
setup(
name='fault',
version='2.0.19',
description=DESCRIPTION,
scripts=[],
packages=[
"fault",
],
install_requires=[
"astor",
"coreir",
"cosa",
"z3-solver",
"hwtypes",
"magma-lang",
"pyyaml",
"scipy",
"numpy"
],
license='BSD License',
url='https://github.com/leonardt/fault',
author='<NAME>',
author_email='<EMAIL>',
python_requires='>=3.6',
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown"
)
|
en
| 0.692674
|
Setup file for fault \ A Python package for testing hardware (part of the magma ecosystem)\
| 1.653795
| 2
|
examples/stock/urls.py
|
soapteam/soapfish
| 20
|
6625681
|
<gh_stars>10-100
from django.conf.urls import url
urlpatterns = [
url(r'^stock/soap11$', 'stock.web.views.dispatch11'),
url(r'^stock/soap12$', 'stock.web.views.dispatch12'),
url(r'^ws/ops$', 'stock.web.views.ops_dispatch'),
]
|
from django.conf.urls import url
urlpatterns = [
url(r'^stock/soap11$', 'stock.web.views.dispatch11'),
url(r'^stock/soap12$', 'stock.web.views.dispatch12'),
url(r'^ws/ops$', 'stock.web.views.ops_dispatch'),
]
|
none
| 1
| 1.281294
| 1
|
|
icsv/instantCsv.py
|
bponsler/icsv
| 1
|
6625682
|
<reponame>bponsler/icsv<gh_stars>1-10
from copy import deepcopy
from os.path import exists
from icsv.base import Row, Col, Cell
class icsv:
'''The icsv class encapsulates the data contained in a single CSV
file.
A CSV consists of zero or more rows of information where each row
has one or more named columns. The names of the columns are referred to
as headers.
The icsv class provides an interface for easily adding new data to a
CSV, writing the data to a CSV file, and creating an icsv from a file.
'''
def __init__(self, headers, delimiter=','):
'''
:param headers: The list of column headers
:type headers: list of strings
:param delimiter: The CSV delimiter
:type delimiter: string
'''
self.__headers = headers
self.__delimiter = delimiter
# List of dictionaries mapping headers to values for each
# row in the CSV
self.__data = []
@classmethod
def fromFile(cls, filename, headers=None, delimiter=',',
containsHeaders=True):
'''Create an icsv from a given CSV file.
:param filename: The path to the CSV file
:type filename: string
:param headers: The list of CSV headers. If this is None they will be
automatically read from the file
:type headers: list of strings
:param delimiter: The CSV delimiter
:type delimiter: string
:param containsHeaders: True if the file list the headers as the first
line, False if it does not
:type containsHeaders: bool
:raises Exception: If the file does not exist
:raises Exception: If ``headers`` is None and ``containsHeaders``
is False
'''
# CSV file must actually exist
if not exists(filename):
raise Exception("File does not exist: %s" % filename)
# Must be able to determine the headers
if headers is None and not containsHeaders:
raise Exception("Could not determine headers. If 'headers' is " \
"None, then 'containsHeaders' must be True")
fd = open(filename, 'r')
content = fd.read()
fd.close()
# Split the content into separate rows
rows = content.strip().split('\n')
# Split each row into separate columns
splitRows = [row.strip().split(delimiter) for row in rows]
data = []
# Grab the headers from the first line in the file
if headers is None:
headers = splitRows[0].split(delimiter)
# Create row dictionaries for all the other data
startIndex = 1 if containsHeaders else 0
for splitRow in splitRows[startIndex:]:
dataMap = dict(zip(headers, splitRow))
data.append(dataMap)
# Create the CSV file
csv = icsv(headers, delimiter)
csv.__data = data
return csv
def write(self, filename, useHeaders=True, overwrite=True):
'''Write the data to the given CSV file.
This is the same as calling::
import icsv
aCsv = icsv.icsv()
writer = icsv.Writer.fromCsv(filename, aCsv, useHeaders, overwrite)
:param filename: The path to the CSV filename
:type filename: string
:param useHeaders: True to write the CSV headers as the first
line the file, False does not do this
:type useHeaders: bool
:param overwrite: True to overwrite existing files, False will
result in an Exception
:type overwrite: bool
:raises Exception: If ``overwrite`` is False, and the file already
exists
'''
# Import writer here, to avoid cyclical dependencies
from icsv.writer import Writer
return Writer.fromCsv(filename, self, useHeaders, overwrite)
def numRows(self):
'''Get the number of rows in the CSV.
:rtype: int
'''
return len(self.__data)
def numCols(self):
'''Get the number of columns in the CSV.
:rtype: int
'''
return len(self.__headers)
def delimiter(self):
'''Get the delimiter value for this CSV.
:rtype: string
'''
return self.__delimiter
def headers(self):
'''Get the list of column headers for this CSV.
:rtype: list of strings
'''
return self.__headers
def data(self):
'''Get the list of :class:`icsv.Row` objects pertaining to the rows
of this CSV.
:rtype: list of :class:`icsv.Row` objects
'''
# Convert all rows into actual Row objects
return [Row(self.__headers, r, self.__delimiter) for r in self.__data]
def setCell(self, header, value, row=-1):
'''Set the value of a cell.
:param header: The header for the cell
:type header: string
:param value: The cell value
:param row: The row index (last row by default)
:type row: int
:raises Exception: If an invalid row index is given
:raises Exception: If an invalid header is given
'''
self.__validateRow(row)
self.__validateHeader(header)
row = self.__data[row]
row[header] = value
def addRow(self, items):
'''Add a row of data to the CSV.
``items`` can be a list of values, or a dictionary of values.
* items -- The list, or dictionary of row items
:raises Exception: If ``items`` is a list and is not equal to the
number of headers
:raises Exception: If ``items`` is a dictionary and contains entries
for unknown headers
:raises Exception: If ``items`` is not a list or a dictionary
'''
if type(items) == type(list()):
self.__addRowList(items)
elif type(items) == type(dict()):
self.__addRowMap(items)
else:
raise Exception("Unknown row type. Expected list or dictionary.")
def removeRow(self, row=-1):
'''Remove the given row.
:param row: The index of the row to remove (last row by default)
:type row: int
:raises Exception: If row is not a valid index
'''
self.__validateRow(row)
del self.__data[row]
def getRow(self, row=-1):
'''Get the given row.
:param row: The row index (last row be default)
:type row: int
:raises Exception: If row is not a valid index
'''
self.__validateRow(row)
return Row(self.__headers, self.__data[row], self.__delimiter)
def getCol(self, header):
'''Get the :class:`icsv.Col` object for the given column header.
:param header: The column header
:type header: string
:rtype: A :class:`icsv.Col` object
:raises Exception: If an unknown header is given
'''
self.__validateHeader(header)
data = [row.get(header, '') for row in self.__data]
return Col(header, data)
def getCell(self, row, header):
'''Get the :class:`icsv.Cell` object contained at the given
row index and column header.
:param row: The row index
:type row: int
:param header: The column header
:type header: string
:rtype: A :class:`icsv.Cell` object
:raises Exception: If an invalid row index is given
:raises Exception: If an unknown header is given
'''
self.__validateRow(row)
self.__validateHeader(header)
value = self.__data[row].get(header, '')
return Cell(row, header, value)
def getHeaderIndex(self, header):
'''Get the column index for the given column header.
:param header: The column header
:type header: string
:rtype: int
:raises Exception: If an unknown header is given
'''
self.__validateHeader(header)
return self.__headers.index(header)
def getHeader(self, index):
'''Get the column header for the given column index.
:param index: The column index
:type index: int
:rtype: string
:raises Exception: If an invalid column index is given
'''
if index not in range(len(self.__headers)):
raise Exception("Header index out of range: %s" % index)
return self.__headers[index]
def getHeaders(self):
'''Get the list of headers for this CSV file as a CSV string.
:rtype: list of strings
'''
return self.__delimiter.join(self.__headers)
# TODO: ability to add new columns
# TODO: ability to remove columns
# TODO: ability to re-arrange rows
# TODO: ability to re-arrange columns
# TODO: ability to search for data
def filter(self, fn):
'''Apply a filter function to the CSV data.
The filter function must have the following signature::
filterFn(rowIndex, columnHeader, cellValue)
:param fn: The filter function
:type fn: function(rowIndex, colHeader, cellValue)
:returns: A list of Cells for which ``fn`` returned True
:rtype: List of :class:`icsv.Cell` objects
'''
allCells = []
for rowIdx in range(len(self.__data)):
row = self.__data[rowIdx]
for header in self.__headers:
cellValue = row.get(header, '')
if fn(rowIdx, header, cellValue):
cell = Cell(rowIdx, header, cellValue)
allCells.append(cell)
return allCells
def map(self, fn, overwrite=False):
'''Map all of the values of the CSV.
The map function must have the following signature::
mapFn(rowIndex, columnHeader, cellValue)
:param fn: The map function
:type fn: function(rowIndex, columnHeader, cellValue)
:param overwrite: True to replace the current icsv data with
the mapped data
:type overwrite: bool
:returns: An :class:`icsv.icsv` object containing the mapped data
:rtype: An :class:`icsv.icsv` object
'''
if overwrite:
csv = self # Update this CSV data
else:
# Create a copy of the current CSV file
csv = icsv(self.__headers, self.__delimiter)
csv.__data = deepcopy(self.__data)
for rowIdx in range(len(self.__data)):
row = self.__data[rowIdx]
for header in self.__headers:
cell = row.get(header, '')
newValue = fn(rowIdx, header, cell)
# Store the new value
csv.__data[rowIdx][header] = newValue
return csv
# TODO: ability to merge two icsvs into one -- not sure how this works
##### Private headers
def __addRowList(self, items):
'''Add a new row consisting of a list of values.
:param items: The list of values
:type items: list of values
:raises Exception: If the number of items given does not match the
number of headers in the CSV
'''
# Must provide values for each header
if len(items) != self.numCols():
raise Exception("Expected %s items, but got %s" % (self.numCols(),
len(items)))
itemMap = dict(zip(self.__headers, items))
self.__addRowMap(itemMap)
def __addRowMap(self, itemMap):
'''Add a new row consisting of a dictionary mapping column headers
to values.
:param itemMap: The dictionary mapping column headers to values
:type itemMap: dict
:raises Exception: If the dictionary contains an entry
for an unknown header
'''
# Do not allow invalid headers to be added
invalid = [h for h in itemMap if h not in self.__headers]
if len(invalid) > 0:
raise Exception("Attempting to add unknown headers: %s" % invalid)
self.__data.append(itemMap)
def __validateRow(self, row):
'''Validate the given row index.
:param row: The row index
:type row: int
:raises Exception: If an invalid row index is given
'''
if (row != -1 or self.numRows() == 0) and \
row not in range(self.numRows()):
raise Exception("Invalid row: %s" % row)
def __validateHeader(self, header):
'''Validate the given header value.
:param header: The header
:type header: string
:raises Exception: If an unknown header is given
'''
if header not in self.__headers:
raise Exception("Invalid header: %s" % header)
def __str__(self):
'''Convert the CSV to a string.'''
lines = [
self.__delimiter.join(self.__headers),
]
s = [self.getRow(i) for i in range(self.numRows())]
lines.extend(s)
return '\n'.join(map(str, lines))
|
from copy import deepcopy
from os.path import exists
from icsv.base import Row, Col, Cell
class icsv:
'''The icsv class encapsulates the data contained in a single CSV
file.
A CSV consists of zero or more rows of information where each row
has one or more named columns. The names of the columns are referred to
as headers.
The icsv class provides an interface for easily adding new data to a
CSV, writing the data to a CSV file, and creating an icsv from a file.
'''
def __init__(self, headers, delimiter=','):
'''
:param headers: The list of column headers
:type headers: list of strings
:param delimiter: The CSV delimiter
:type delimiter: string
'''
self.__headers = headers
self.__delimiter = delimiter
# List of dictionaries mapping headers to values for each
# row in the CSV
self.__data = []
@classmethod
def fromFile(cls, filename, headers=None, delimiter=',',
containsHeaders=True):
'''Create an icsv from a given CSV file.
:param filename: The path to the CSV file
:type filename: string
:param headers: The list of CSV headers. If this is None they will be
automatically read from the file
:type headers: list of strings
:param delimiter: The CSV delimiter
:type delimiter: string
:param containsHeaders: True if the file list the headers as the first
line, False if it does not
:type containsHeaders: bool
:raises Exception: If the file does not exist
:raises Exception: If ``headers`` is None and ``containsHeaders``
is False
'''
# CSV file must actually exist
if not exists(filename):
raise Exception("File does not exist: %s" % filename)
# Must be able to determine the headers
if headers is None and not containsHeaders:
raise Exception("Could not determine headers. If 'headers' is " \
"None, then 'containsHeaders' must be True")
fd = open(filename, 'r')
content = fd.read()
fd.close()
# Split the content into separate rows
rows = content.strip().split('\n')
# Split each row into separate columns
splitRows = [row.strip().split(delimiter) for row in rows]
data = []
# Grab the headers from the first line in the file
if headers is None:
headers = splitRows[0].split(delimiter)
# Create row dictionaries for all the other data
startIndex = 1 if containsHeaders else 0
for splitRow in splitRows[startIndex:]:
dataMap = dict(zip(headers, splitRow))
data.append(dataMap)
# Create the CSV file
csv = icsv(headers, delimiter)
csv.__data = data
return csv
def write(self, filename, useHeaders=True, overwrite=True):
'''Write the data to the given CSV file.
This is the same as calling::
import icsv
aCsv = icsv.icsv()
writer = icsv.Writer.fromCsv(filename, aCsv, useHeaders, overwrite)
:param filename: The path to the CSV filename
:type filename: string
:param useHeaders: True to write the CSV headers as the first
line the file, False does not do this
:type useHeaders: bool
:param overwrite: True to overwrite existing files, False will
result in an Exception
:type overwrite: bool
:raises Exception: If ``overwrite`` is False, and the file already
exists
'''
# Import writer here, to avoid cyclical dependencies
from icsv.writer import Writer
return Writer.fromCsv(filename, self, useHeaders, overwrite)
def numRows(self):
'''Get the number of rows in the CSV.
:rtype: int
'''
return len(self.__data)
def numCols(self):
'''Get the number of columns in the CSV.
:rtype: int
'''
return len(self.__headers)
def delimiter(self):
'''Get the delimiter value for this CSV.
:rtype: string
'''
return self.__delimiter
def headers(self):
'''Get the list of column headers for this CSV.
:rtype: list of strings
'''
return self.__headers
def data(self):
'''Get the list of :class:`icsv.Row` objects pertaining to the rows
of this CSV.
:rtype: list of :class:`icsv.Row` objects
'''
# Convert all rows into actual Row objects
return [Row(self.__headers, r, self.__delimiter) for r in self.__data]
def setCell(self, header, value, row=-1):
'''Set the value of a cell.
:param header: The header for the cell
:type header: string
:param value: The cell value
:param row: The row index (last row by default)
:type row: int
:raises Exception: If an invalid row index is given
:raises Exception: If an invalid header is given
'''
self.__validateRow(row)
self.__validateHeader(header)
row = self.__data[row]
row[header] = value
def addRow(self, items):
'''Add a row of data to the CSV.
``items`` can be a list of values, or a dictionary of values.
* items -- The list, or dictionary of row items
:raises Exception: If ``items`` is a list and is not equal to the
number of headers
:raises Exception: If ``items`` is a dictionary and contains entries
for unknown headers
:raises Exception: If ``items`` is not a list or a dictionary
'''
if type(items) == type(list()):
self.__addRowList(items)
elif type(items) == type(dict()):
self.__addRowMap(items)
else:
raise Exception("Unknown row type. Expected list or dictionary.")
def removeRow(self, row=-1):
'''Remove the given row.
:param row: The index of the row to remove (last row by default)
:type row: int
:raises Exception: If row is not a valid index
'''
self.__validateRow(row)
del self.__data[row]
def getRow(self, row=-1):
'''Get the given row.
:param row: The row index (last row be default)
:type row: int
:raises Exception: If row is not a valid index
'''
self.__validateRow(row)
return Row(self.__headers, self.__data[row], self.__delimiter)
def getCol(self, header):
'''Get the :class:`icsv.Col` object for the given column header.
:param header: The column header
:type header: string
:rtype: A :class:`icsv.Col` object
:raises Exception: If an unknown header is given
'''
self.__validateHeader(header)
data = [row.get(header, '') for row in self.__data]
return Col(header, data)
def getCell(self, row, header):
'''Get the :class:`icsv.Cell` object contained at the given
row index and column header.
:param row: The row index
:type row: int
:param header: The column header
:type header: string
:rtype: A :class:`icsv.Cell` object
:raises Exception: If an invalid row index is given
:raises Exception: If an unknown header is given
'''
self.__validateRow(row)
self.__validateHeader(header)
value = self.__data[row].get(header, '')
return Cell(row, header, value)
def getHeaderIndex(self, header):
'''Get the column index for the given column header.
:param header: The column header
:type header: string
:rtype: int
:raises Exception: If an unknown header is given
'''
self.__validateHeader(header)
return self.__headers.index(header)
def getHeader(self, index):
'''Get the column header for the given column index.
:param index: The column index
:type index: int
:rtype: string
:raises Exception: If an invalid column index is given
'''
if index not in range(len(self.__headers)):
raise Exception("Header index out of range: %s" % index)
return self.__headers[index]
def getHeaders(self):
'''Get the list of headers for this CSV file as a CSV string.
:rtype: list of strings
'''
return self.__delimiter.join(self.__headers)
# TODO: ability to add new columns
# TODO: ability to remove columns
# TODO: ability to re-arrange rows
# TODO: ability to re-arrange columns
# TODO: ability to search for data
def filter(self, fn):
'''Apply a filter function to the CSV data.
The filter function must have the following signature::
filterFn(rowIndex, columnHeader, cellValue)
:param fn: The filter function
:type fn: function(rowIndex, colHeader, cellValue)
:returns: A list of Cells for which ``fn`` returned True
:rtype: List of :class:`icsv.Cell` objects
'''
allCells = []
for rowIdx in range(len(self.__data)):
row = self.__data[rowIdx]
for header in self.__headers:
cellValue = row.get(header, '')
if fn(rowIdx, header, cellValue):
cell = Cell(rowIdx, header, cellValue)
allCells.append(cell)
return allCells
def map(self, fn, overwrite=False):
'''Map all of the values of the CSV.
The map function must have the following signature::
mapFn(rowIndex, columnHeader, cellValue)
:param fn: The map function
:type fn: function(rowIndex, columnHeader, cellValue)
:param overwrite: True to replace the current icsv data with
the mapped data
:type overwrite: bool
:returns: An :class:`icsv.icsv` object containing the mapped data
:rtype: An :class:`icsv.icsv` object
'''
if overwrite:
csv = self # Update this CSV data
else:
# Create a copy of the current CSV file
csv = icsv(self.__headers, self.__delimiter)
csv.__data = deepcopy(self.__data)
for rowIdx in range(len(self.__data)):
row = self.__data[rowIdx]
for header in self.__headers:
cell = row.get(header, '')
newValue = fn(rowIdx, header, cell)
# Store the new value
csv.__data[rowIdx][header] = newValue
return csv
# TODO: ability to merge two icsvs into one -- not sure how this works
##### Private headers
def __addRowList(self, items):
'''Add a new row consisting of a list of values.
:param items: The list of values
:type items: list of values
:raises Exception: If the number of items given does not match the
number of headers in the CSV
'''
# Must provide values for each header
if len(items) != self.numCols():
raise Exception("Expected %s items, but got %s" % (self.numCols(),
len(items)))
itemMap = dict(zip(self.__headers, items))
self.__addRowMap(itemMap)
def __addRowMap(self, itemMap):
'''Add a new row consisting of a dictionary mapping column headers
to values.
:param itemMap: The dictionary mapping column headers to values
:type itemMap: dict
:raises Exception: If the dictionary contains an entry
for an unknown header
'''
# Do not allow invalid headers to be added
invalid = [h for h in itemMap if h not in self.__headers]
if len(invalid) > 0:
raise Exception("Attempting to add unknown headers: %s" % invalid)
self.__data.append(itemMap)
def __validateRow(self, row):
'''Validate the given row index.
:param row: The row index
:type row: int
:raises Exception: If an invalid row index is given
'''
if (row != -1 or self.numRows() == 0) and \
row not in range(self.numRows()):
raise Exception("Invalid row: %s" % row)
def __validateHeader(self, header):
'''Validate the given header value.
:param header: The header
:type header: string
:raises Exception: If an unknown header is given
'''
if header not in self.__headers:
raise Exception("Invalid header: %s" % header)
def __str__(self):
'''Convert the CSV to a string.'''
lines = [
self.__delimiter.join(self.__headers),
]
s = [self.getRow(i) for i in range(self.numRows())]
lines.extend(s)
return '\n'.join(map(str, lines))
|
en
| 0.616738
|
The icsv class encapsulates the data contained in a single CSV file. A CSV consists of zero or more rows of information where each row has one or more named columns. The names of the columns are referred to as headers. The icsv class provides an interface for easily adding new data to a CSV, writing the data to a CSV file, and creating an icsv from a file. :param headers: The list of column headers :type headers: list of strings :param delimiter: The CSV delimiter :type delimiter: string # List of dictionaries mapping headers to values for each # row in the CSV Create an icsv from a given CSV file. :param filename: The path to the CSV file :type filename: string :param headers: The list of CSV headers. If this is None they will be automatically read from the file :type headers: list of strings :param delimiter: The CSV delimiter :type delimiter: string :param containsHeaders: True if the file list the headers as the first line, False if it does not :type containsHeaders: bool :raises Exception: If the file does not exist :raises Exception: If ``headers`` is None and ``containsHeaders`` is False # CSV file must actually exist # Must be able to determine the headers # Split the content into separate rows # Split each row into separate columns # Grab the headers from the first line in the file # Create row dictionaries for all the other data # Create the CSV file Write the data to the given CSV file. This is the same as calling:: import icsv aCsv = icsv.icsv() writer = icsv.Writer.fromCsv(filename, aCsv, useHeaders, overwrite) :param filename: The path to the CSV filename :type filename: string :param useHeaders: True to write the CSV headers as the first line the file, False does not do this :type useHeaders: bool :param overwrite: True to overwrite existing files, False will result in an Exception :type overwrite: bool :raises Exception: If ``overwrite`` is False, and the file already exists # Import writer here, to avoid cyclical dependencies Get the number of rows in the CSV. :rtype: int Get the number of columns in the CSV. :rtype: int Get the delimiter value for this CSV. :rtype: string Get the list of column headers for this CSV. :rtype: list of strings Get the list of :class:`icsv.Row` objects pertaining to the rows of this CSV. :rtype: list of :class:`icsv.Row` objects # Convert all rows into actual Row objects Set the value of a cell. :param header: The header for the cell :type header: string :param value: The cell value :param row: The row index (last row by default) :type row: int :raises Exception: If an invalid row index is given :raises Exception: If an invalid header is given Add a row of data to the CSV. ``items`` can be a list of values, or a dictionary of values. * items -- The list, or dictionary of row items :raises Exception: If ``items`` is a list and is not equal to the number of headers :raises Exception: If ``items`` is a dictionary and contains entries for unknown headers :raises Exception: If ``items`` is not a list or a dictionary Remove the given row. :param row: The index of the row to remove (last row by default) :type row: int :raises Exception: If row is not a valid index Get the given row. :param row: The row index (last row be default) :type row: int :raises Exception: If row is not a valid index Get the :class:`icsv.Col` object for the given column header. :param header: The column header :type header: string :rtype: A :class:`icsv.Col` object :raises Exception: If an unknown header is given Get the :class:`icsv.Cell` object contained at the given row index and column header. :param row: The row index :type row: int :param header: The column header :type header: string :rtype: A :class:`icsv.Cell` object :raises Exception: If an invalid row index is given :raises Exception: If an unknown header is given Get the column index for the given column header. :param header: The column header :type header: string :rtype: int :raises Exception: If an unknown header is given Get the column header for the given column index. :param index: The column index :type index: int :rtype: string :raises Exception: If an invalid column index is given Get the list of headers for this CSV file as a CSV string. :rtype: list of strings # TODO: ability to add new columns # TODO: ability to remove columns # TODO: ability to re-arrange rows # TODO: ability to re-arrange columns # TODO: ability to search for data Apply a filter function to the CSV data. The filter function must have the following signature:: filterFn(rowIndex, columnHeader, cellValue) :param fn: The filter function :type fn: function(rowIndex, colHeader, cellValue) :returns: A list of Cells for which ``fn`` returned True :rtype: List of :class:`icsv.Cell` objects Map all of the values of the CSV. The map function must have the following signature:: mapFn(rowIndex, columnHeader, cellValue) :param fn: The map function :type fn: function(rowIndex, columnHeader, cellValue) :param overwrite: True to replace the current icsv data with the mapped data :type overwrite: bool :returns: An :class:`icsv.icsv` object containing the mapped data :rtype: An :class:`icsv.icsv` object # Update this CSV data # Create a copy of the current CSV file # Store the new value # TODO: ability to merge two icsvs into one -- not sure how this works ##### Private headers Add a new row consisting of a list of values. :param items: The list of values :type items: list of values :raises Exception: If the number of items given does not match the number of headers in the CSV # Must provide values for each header Add a new row consisting of a dictionary mapping column headers to values. :param itemMap: The dictionary mapping column headers to values :type itemMap: dict :raises Exception: If the dictionary contains an entry for an unknown header # Do not allow invalid headers to be added Validate the given row index. :param row: The row index :type row: int :raises Exception: If an invalid row index is given Validate the given header value. :param header: The header :type header: string :raises Exception: If an unknown header is given Convert the CSV to a string.
| 3.996339
| 4
|
release/scripts/mgear/rigbits/sdk_io.py
|
tk-aria/mgear4
| 72
|
6625683
|
"""Rigbits, SDK i/o
exportSDKs(["drivenNodeA", "drivenNodeB"], "path/to/desired/output.json")
importSDKs(path/to/desired/output.json)
# MIRRORING -------
# copy from source, say left, to target, right
copySDKsToNode("jacketFlap_L1_fk0_sdk",
"neck_C0_0_jnt",
"jacketFlap_R1_fk0_sdk")
# invert/mirror the attributes necessary for the other side,
# in this case it is the following attributes
mirrorSDKkeys("jacketFlap_R1_fk0_sdk",
attributes=["rotateZ"],
invertDriver=True,
invertDriven=False)
mirrorSDKkeys("jacketFlap_R1_fk0_sdk",
attributes=["translateX", "translateY"],
invertDriver=True,
invertDriven=True)
# in this other instance, it was the same
copySDKsToNode("jacketFlap_L0_fk0_sdk",
"neck_C0_0_jnt",
"jacketFlap_R0_fk0_sdk")
Attributes:
SDK_ANIMCURVES_TYPE (list): sdk anim curves to support
"""
import json
import pprint
import pymel.core as pm
import mgear.core.utils as mUtils
from .six import string_types
SDK_UTILITY_TYPE = ("blendWeighted",)
SDK_ANIMCURVES_TYPE = ("animCurveUA", "animCurveUL", "animCurveUU")
# ==============================================================================
# Data export
# ==============================================================================
def _importData(filePath):
"""Return the contents of a json file. Expecting, but not limited to,
a dictionary.
Args:
filePath (string): path to file
Returns:
dict: contents of json file, expected dict
"""
try:
with open(filePath, "r") as f:
data = json.load(f)
return data
except Exception as e:
print(e)
def _exportData(data, filePath):
"""export data, dict, to filepath provided
Args:
data (dict): expected dict, not limited to
filePath (string): path to output json file
"""
try:
with open(filePath, "w") as f:
json.dump(data, f, sort_keys=False, indent=4)
except Exception as e:
print(e)
# ==============================================================================
# pymel Convenience
# ==============================================================================
def getPynodes(nodes):
"""Conevenience function to allow uses to pass in strings, but convert to
pynodes if not already.
Args:
nodes (list): string names
Returns:
list: of pynodes
"""
pynodes = []
for node in nodes:
if isinstance(node, string_types):
node = pm.PyNode(node)
pynodes.append(node)
return pynodes
# ==============================================================================
# sdk functions
# ==============================================================================
def getSDKDestination(animNodeOutputPlug):
"""Get the final destination of the sdk node, skips blendweighted
and conversion node to get the transform node.
TODO: Open this up to provided type destination
Args:
animNodeOutputPlug (string): animationNode.output
Returns:
list: name of the node, and attr
"""
connectionTypes = [SDK_UTILITY_TYPE[0], "transform"]
targetDrivenAttr = pm.listConnections(animNodeOutputPlug,
source=False,
destination=True,
plugs=True,
type=connectionTypes,
scn=True)
if pm.nodeType(targetDrivenAttr[0].nodeName()) == "blendWeighted":
blendNodeOutAttr = targetDrivenAttr[0].node().attr("output")
targetDrivenAttr = pm.listConnections(blendNodeOutAttr,
destination=True,
plugs=True,
scn=True)
drivenNode, drivenAttr = targetDrivenAttr[0].split(".")
return drivenNode, drivenAttr
def getMultiDriverSDKs(driven, sourceDriverFilter=None):
"""get the sdk nodes that are added through a blendweighted node
Args:
driven (string): name of the driven node
sourceDriverFilter (list, pynode): Driver transforms to filter by,
if the connected SDK is not driven by this node it will not be returned.
Returns:
list: of sdk nodes
"""
sdkDrivers = []
for sdkUtility in SDK_UTILITY_TYPE:
blend_NodePair = pm.listConnections(driven,
source=True,
type=sdkUtility,
exactType=True,
plugs=True,
connections=True,
sourceFirst=True,
scn=True) or []
if not blend_NodePair:
continue
for pairs in blend_NodePair:
sdkPairs = getConnectedSDKs(pairs[0].nodeName(), sourceDriverFilter=sourceDriverFilter)
for sPair in sdkPairs:
sdkDrivers.append([sPair[0], pairs[1]])
return sdkDrivers
def getConnectedSDKs(driven, curvesOfType=[], sourceDriverFilter=None):
"""get all the sdk, animcurve, nodes/plugs connected to the provided node.
Args:
node (str, pynode): name of node, or pynode
curvesOfType (list, optional): animCurve nodes of type if none provided
will fall back on module defined supported set.
sourceDriverFilter (list, pynode): Driver transforms to filter by,
if the connected SDK is not driven by this node it will not be returned.
Returns:
list: of sdk nodes, paired with the node/attr they effect
"""
retrievedSDKNodes = []
if not curvesOfType:
curvesOfType = SDK_ANIMCURVES_TYPE
for animCurve in curvesOfType:
animCurveNodes = pm.listConnections(driven,
source=True,
type=animCurve,
exactType=True,
plugs=True,
connections=True,
sourceFirst=True,
scn=True) or []
# If the filter is given, filter out only nodes driven by
# transforms inside sourceDriverFilter
if sourceDriverFilter and animCurveNodes:
filteredSDKNodes = []
for driver_plug, anim_plug in animCurveNodes:
# Getting the connected Driver Transform nodes
connectedDrivers = pm.listConnections(driver_plug.node(),
source=True,
type="transform",
exactType=True,
scn=True
)
# If any are found, add them to filteredSDKNodes
# if the node name is in sourceDriverFilter
if connectedDrivers:
for conDriver in connectedDrivers:
if conDriver.node() in sourceDriverFilter:
filteredSDKNodes.append((driver_plug, anim_plug))
# Replacing animCurveNodes with the new filtered list.
animCurveNodes = filteredSDKNodes
retrievedSDKNodes.extend(animCurveNodes)
return retrievedSDKNodes
def getSDKInfo(animNode):
"""get all the information from an sdk/animCurve in a dictioanry
for exporting.
Args:
animNode (pynode): name of node, pynode
Returns:
dict: dictionary of all the attrs to be exported
"""
sdkInfo_dict = {}
sdkKey_Info = []
numberOfKeys = len(pm.listAttr("{0}.ktv".format(animNode), multi=True)) / 3
itt_list = pm.keyTangent(animNode, itt=True, q=True)
ott_list = pm.keyTangent(animNode, ott=True, q=True)
# maya doesnt return value if there is only one key frame set.
if itt_list == None:
itt_list = ["linear"]
if ott_list == None:
ott_list = ["linear"]
for index in range(0, numberOfKeys):
value = pm.getAttr("{0}.keyTimeValue[{1}]".format(animNode, index))
absoluteValue = pm.keyframe(animNode,
q=True,
valueChange=True,
index=index)[0]
keyData = [value[0], absoluteValue, itt_list[index], ott_list[index]]
sdkKey_Info.append(keyData)
sdkInfo_dict["keys"] = sdkKey_Info
sdkInfo_dict["type"] = animNode.type()
sdkInfo_dict["preInfinity"] = animNode.getAttr("preInfinity")
sdkInfo_dict["postInfinity"] = animNode.getAttr("postInfinity")
sdkInfo_dict["weightedTangents"] = animNode.getAttr("weightedTangents")
animNodeInputPlug = "{0}.input".format(animNode.nodeName())
sourceDriverAttr = pm.listConnections(animNodeInputPlug,
source=True,
plugs=True,
scn=True)[0]
driverNode, driverAttr = sourceDriverAttr.split(".")
sdkInfo_dict["driverNode"] = driverNode
sdkInfo_dict["driverAttr"] = driverAttr
animNodeOutputPlug = "{0}.output".format(animNode.nodeName())
drivenNode, drivenAttr = getSDKDestination(animNodeOutputPlug)
sdkInfo_dict["drivenNode"] = drivenNode
sdkInfo_dict["drivenAttr"] = drivenAttr
return sdkInfo_dict
def getAllSDKInfoFromNode(node):
"""returns a dict for all of the connected sdk/animCurve on
the provided node
Args:
node (pynode): name of node to the be searched
Returns:
dict: of all of the sdk nodes
"""
allSDKInfo_dict = {}
retrievedSDKNodes = getConnectedSDKs(node)
retrievedSDKNodes.extend(getMultiDriverSDKs(node))
for animPlug, targetPlug in retrievedSDKNodes:
allSDKInfo_dict[animPlug.nodeName()] = getSDKInfo(animPlug.node())
return allSDKInfo_dict
def removeSDKs(node, attributes=[], sourceDriverFilter=None):
"""Convenience function to remove, delete, all sdk nodes associated with
the provided node
Args:
node (pynode): name of the node
attributes (list, optional): list of attributes to remove sdks from
if none provided, assume all
sourceDriverFilter (list, pynode): Driver transforms to filter by,
if the connected SDK is not driven by this node it will not be returned.
"""
toDelete = []
# if no attrs provided, assume all
if not attributes:
attributes = pm.listAttr(node, connectable=True)
sourceSDKInfo = getConnectedSDKs(node, sourceDriverFilter=sourceDriverFilter)
sourceSDKInfo.extend(getMultiDriverSDKs(node, sourceDriverFilter=sourceDriverFilter))
for source, dest in sourceSDKInfo:
if dest.plugAttr(longName=True) not in attributes:
continue
toDelete.append(source.node())
pm.delete(toDelete)
def copySDKsToNode(sourceDriven,
targetDriver,
targetDriven,
sourceAttributes=[],
sourceDriverFilter=None):
"""Duplicates sdk nodes from the source drive, to any designated target
driver/driven
Args:
sourceDriven (pynode): source to copy from
targetDriver (pynode): to drive the new sdk node
targetDriven (pynode): node to be driven
sourceAttributes (list, optional): of attrs to copy, if none provided
assume all
sourceDriverFilter (list, pynode): Driver transforms to filter by,
if the connected SDK is not driven by this node it will not be returned.
Returns:
TYPE: n/a
"""
sourceDriven, targetDriver, targetDriven = getPynodes([sourceDriven,
targetDriver,
targetDriven])
if sourceDriven == targetDriven:
pm.warning("You cannot copy SDKs to the same name.")
return
# if no attrs provided, assume all
if not sourceAttributes:
sourceAttributes = pm.listAttr(sourceDriven, connectable=True)
# sourceDriverFilter = None
sourceSDKInfo = getConnectedSDKs(sourceDriven, sourceDriverFilter=None)
sourceSDKInfo.extend(getMultiDriverSDKs(sourceDriven, sourceDriverFilter))
for source, dest in sourceSDKInfo:
if dest.plugAttr(longName=True) not in sourceAttributes:
continue
sourceDriverAttr = pm.listConnections("{0}.input".format(
source.nodeName()),
source=True,
plugs=True,
scn=True)[0]
duplicateCurve = pm.duplicate(source, name="{0}_{1}".format(
targetDriven.name(),
dest.attrName(longName=True)))[0]
pm.connectAttr("{0}.{1}".format(
targetDriver,
sourceDriverAttr.attrName(longName=True)),
"{0}.input".format(duplicateCurve))
# drivenNode, drivenAttr = getSDKDestination(duplicateCurve.output)
drivenAttrPlug = "{0}.{1}".format(targetDriven,
dest.name(includeNode=False))
if pm.listConnections(drivenAttrPlug):
targetAttrPlug = getBlendNodes(drivenAttrPlug)
else:
targetAttrPlug = drivenAttrPlug
try:
pm.connectAttr(duplicateCurve.output, targetAttrPlug)
except RuntimeError:
# error when trying to connect to a plug that is already connected.
# trying next avalible plug.
targetAttrPlug = targetAttrPlug.replace("[1]", "[0]")
pm.connectAttr(duplicateCurve.output, targetAttrPlug)
def stripKeys(animNode):
"""remove animation keys from the provided sdk node
Args:
animNode (pynode): sdk/anim node
"""
numKeys = len(pm.listAttr(animNode + ".ktv", multi=True)) / 3
for x in range(0, numKeys):
animNode.remove(0)
def invertKeyValues(newKeyNode, invertDriver=True, invertDriven=True):
"""Mirror keyframe node procedure, in case you need to flip your SDK's.
Args:
newKeyNode (PyNode): sdk node to invert values on
invertDriver (bool, optional): should the drivers values be inverted
invertDriven (bool, optional): should the drivens values be inverted
"""
sdkInfo_dict = getSDKInfo(newKeyNode)
stripKeys(newKeyNode)
animKeys = sdkInfo_dict["keys"]
for index in range(0, len(animKeys)):
frameValue = animKeys[index]
if invertDriver and invertDriven:
timeValue = frameValue[0] * -1
value = frameValue[1] * -1
elif invertDriver and not invertDriven:
timeValue = frameValue[0] * -1
value = frameValue[1]
elif not invertDriver and invertDriven:
timeValue = frameValue[0]
value = frameValue[1] * -1
else:
timeValue = frameValue[0]
value = frameValue[1]
pm.setKeyframe(newKeyNode,
float=timeValue,
value=value,
itt=frameValue[2],
ott=frameValue[3])
def mirrorSDKkeys(node, attributes=[], invertDriver=True, invertDriven=True):
"""mirror/invert the values on the specified node and attrs, get the sdks
and invert those values
Args:
node (pynode): node being driven to have its sdk values inverted
attributes (list, optional): attrs to be inverted
invertDriver (bool, optional): should the driver, "time" values
be inverted
invertDriven (bool, optional): should the driven, "value" values
be inverted
"""
sourceSDKInfo = getConnectedSDKs(node)
sourceSDKInfo.extend(getMultiDriverSDKs(node))
if not attributes:
attributes = pm.listAttr(node, connectable=True)
for source, dest in sourceSDKInfo:
if dest.plugAttr(longName=True) not in attributes:
continue
animCurve = source.node()
invertKeyValues(animCurve,
invertDriver=invertDriver,
invertDriven=invertDriven)
def getBlendNodes(attrPlug):
"""Check the attrPlug (node.attr) provided for any existing connections
if blendWeighted exists, return the appropriate input[#], if sdk, create
a blendweighted and connect sdk, return input[#]
Args:
attrPlug (string): node.attr
Returns:
string: node.attr of the blendweighted node that was just created or
existing
"""
# check what the connection type is
blendNode = pm.listConnections(attrPlug, scn=True)
if pm.nodeType(blendNode[0]) in SDK_ANIMCURVES_TYPE:
existingAnimNode = blendNode[0]
blendNodeName = "{0}_bwn".format(attrPlug.replace(".", "_"))
blendNode = [pm.createNode("blendWeighted", n=blendNodeName)]
pm.connectAttr(blendNode[0].attr("output"), attrPlug, f=True)
destPlug = "{0}.input[0]".format(blendNode[0].name())
pm.connectAttr(existingAnimNode.attr("output"), destPlug, f=True)
if pm.nodeType(blendNode[0]) in SDK_UTILITY_TYPE:
blendNode = blendNode[0]
if type(blendNode) == list:
blendNode = blendNode[0]
numOfInputs = len(blendNode.getAttr("input"))
destPlug = "{0}.input[{1}]".format(blendNode.name(), numOfInputs)
return destPlug
def createSDKFromDict(sdkInfo_dict):
"""Create a sdk node from the provided info dict
Args:
sdkInfo_dict (dict): dict of node information to create
Returns:
PyNode: created sdk node
"""
sdkName = "{0}_{1}".format(sdkInfo_dict["drivenNode"],
sdkInfo_dict["drivenAttr"])
sdkNode = pm.createNode(sdkInfo_dict["type"], name=sdkName, ss=True)
pm.connectAttr("{0}.{1}".format(sdkInfo_dict["driverNode"],
sdkInfo_dict["driverAttr"]),
"{0}.input".format(sdkNode), f=True)
drivenAttrPlug = "{0}.{1}".format(sdkInfo_dict["drivenNode"],
sdkInfo_dict["drivenAttr"])
if pm.listConnections(drivenAttrPlug):
targetAttrPlug = getBlendNodes(drivenAttrPlug)
else:
targetAttrPlug = drivenAttrPlug
pm.connectAttr(sdkNode.output, targetAttrPlug, f=True)
animKeys = sdkInfo_dict["keys"]
for index in range(0, len(animKeys)):
frameValue = animKeys[index]
pm.setKeyframe(sdkNode,
float=frameValue[0],
value=frameValue[1],
itt=frameValue[2],
ott=frameValue[3])
sdkNode.setAttr("preInfinity", sdkInfo_dict["preInfinity"])
sdkNode.setAttr("postInfinity", sdkInfo_dict["postInfinity"])
pm.keyTangent(sdkNode)
sdkNode.setWeighted(sdkInfo_dict["weightedTangents"])
return sdkNode
def exportSDKs(nodes, filePath):
"""exports the sdk information based on the provided nodes to a json file
Args:
nodes (list): of nodes to export
filePath (string): full filepath to export jsons to
"""
sdksToExport_dict = {}
for node in nodes:
node = getPynodes([node])[0]
sdksToExport_dict.update(getAllSDKInfoFromNode(node))
_exportData(sdksToExport_dict, filePath)
return sdksToExport_dict
@mUtils.one_undo
def importSDKs(filePath):
"""create sdk nodes from json file, connected to drivers and driven
Args:
filePath (string): path to json file
"""
allSDKInfo_dict = _importData(filePath)
createdNodes = []
failedNodes = []
for sdkName, sdkInfo_dict in allSDKInfo_dict.items():
try:
createdNodes.append(createSDKFromDict(sdkInfo_dict))
except Exception as e:
failedNodes.append(sdkName)
print("{0}:{1}".format(sdkName, e))
print("Nodes created ---------------------------------")
pprint.pprint(createdNodes)
print("Nodes failed ---------------------------------")
pprint.pprint(failedNodes)
|
"""Rigbits, SDK i/o
exportSDKs(["drivenNodeA", "drivenNodeB"], "path/to/desired/output.json")
importSDKs(path/to/desired/output.json)
# MIRRORING -------
# copy from source, say left, to target, right
copySDKsToNode("jacketFlap_L1_fk0_sdk",
"neck_C0_0_jnt",
"jacketFlap_R1_fk0_sdk")
# invert/mirror the attributes necessary for the other side,
# in this case it is the following attributes
mirrorSDKkeys("jacketFlap_R1_fk0_sdk",
attributes=["rotateZ"],
invertDriver=True,
invertDriven=False)
mirrorSDKkeys("jacketFlap_R1_fk0_sdk",
attributes=["translateX", "translateY"],
invertDriver=True,
invertDriven=True)
# in this other instance, it was the same
copySDKsToNode("jacketFlap_L0_fk0_sdk",
"neck_C0_0_jnt",
"jacketFlap_R0_fk0_sdk")
Attributes:
SDK_ANIMCURVES_TYPE (list): sdk anim curves to support
"""
import json
import pprint
import pymel.core as pm
import mgear.core.utils as mUtils
from .six import string_types
SDK_UTILITY_TYPE = ("blendWeighted",)
SDK_ANIMCURVES_TYPE = ("animCurveUA", "animCurveUL", "animCurveUU")
# ==============================================================================
# Data export
# ==============================================================================
def _importData(filePath):
"""Return the contents of a json file. Expecting, but not limited to,
a dictionary.
Args:
filePath (string): path to file
Returns:
dict: contents of json file, expected dict
"""
try:
with open(filePath, "r") as f:
data = json.load(f)
return data
except Exception as e:
print(e)
def _exportData(data, filePath):
"""export data, dict, to filepath provided
Args:
data (dict): expected dict, not limited to
filePath (string): path to output json file
"""
try:
with open(filePath, "w") as f:
json.dump(data, f, sort_keys=False, indent=4)
except Exception as e:
print(e)
# ==============================================================================
# pymel Convenience
# ==============================================================================
def getPynodes(nodes):
"""Conevenience function to allow uses to pass in strings, but convert to
pynodes if not already.
Args:
nodes (list): string names
Returns:
list: of pynodes
"""
pynodes = []
for node in nodes:
if isinstance(node, string_types):
node = pm.PyNode(node)
pynodes.append(node)
return pynodes
# ==============================================================================
# sdk functions
# ==============================================================================
def getSDKDestination(animNodeOutputPlug):
"""Get the final destination of the sdk node, skips blendweighted
and conversion node to get the transform node.
TODO: Open this up to provided type destination
Args:
animNodeOutputPlug (string): animationNode.output
Returns:
list: name of the node, and attr
"""
connectionTypes = [SDK_UTILITY_TYPE[0], "transform"]
targetDrivenAttr = pm.listConnections(animNodeOutputPlug,
source=False,
destination=True,
plugs=True,
type=connectionTypes,
scn=True)
if pm.nodeType(targetDrivenAttr[0].nodeName()) == "blendWeighted":
blendNodeOutAttr = targetDrivenAttr[0].node().attr("output")
targetDrivenAttr = pm.listConnections(blendNodeOutAttr,
destination=True,
plugs=True,
scn=True)
drivenNode, drivenAttr = targetDrivenAttr[0].split(".")
return drivenNode, drivenAttr
def getMultiDriverSDKs(driven, sourceDriverFilter=None):
"""get the sdk nodes that are added through a blendweighted node
Args:
driven (string): name of the driven node
sourceDriverFilter (list, pynode): Driver transforms to filter by,
if the connected SDK is not driven by this node it will not be returned.
Returns:
list: of sdk nodes
"""
sdkDrivers = []
for sdkUtility in SDK_UTILITY_TYPE:
blend_NodePair = pm.listConnections(driven,
source=True,
type=sdkUtility,
exactType=True,
plugs=True,
connections=True,
sourceFirst=True,
scn=True) or []
if not blend_NodePair:
continue
for pairs in blend_NodePair:
sdkPairs = getConnectedSDKs(pairs[0].nodeName(), sourceDriverFilter=sourceDriverFilter)
for sPair in sdkPairs:
sdkDrivers.append([sPair[0], pairs[1]])
return sdkDrivers
def getConnectedSDKs(driven, curvesOfType=[], sourceDriverFilter=None):
"""get all the sdk, animcurve, nodes/plugs connected to the provided node.
Args:
node (str, pynode): name of node, or pynode
curvesOfType (list, optional): animCurve nodes of type if none provided
will fall back on module defined supported set.
sourceDriverFilter (list, pynode): Driver transforms to filter by,
if the connected SDK is not driven by this node it will not be returned.
Returns:
list: of sdk nodes, paired with the node/attr they effect
"""
retrievedSDKNodes = []
if not curvesOfType:
curvesOfType = SDK_ANIMCURVES_TYPE
for animCurve in curvesOfType:
animCurveNodes = pm.listConnections(driven,
source=True,
type=animCurve,
exactType=True,
plugs=True,
connections=True,
sourceFirst=True,
scn=True) or []
# If the filter is given, filter out only nodes driven by
# transforms inside sourceDriverFilter
if sourceDriverFilter and animCurveNodes:
filteredSDKNodes = []
for driver_plug, anim_plug in animCurveNodes:
# Getting the connected Driver Transform nodes
connectedDrivers = pm.listConnections(driver_plug.node(),
source=True,
type="transform",
exactType=True,
scn=True
)
# If any are found, add them to filteredSDKNodes
# if the node name is in sourceDriverFilter
if connectedDrivers:
for conDriver in connectedDrivers:
if conDriver.node() in sourceDriverFilter:
filteredSDKNodes.append((driver_plug, anim_plug))
# Replacing animCurveNodes with the new filtered list.
animCurveNodes = filteredSDKNodes
retrievedSDKNodes.extend(animCurveNodes)
return retrievedSDKNodes
def getSDKInfo(animNode):
"""get all the information from an sdk/animCurve in a dictioanry
for exporting.
Args:
animNode (pynode): name of node, pynode
Returns:
dict: dictionary of all the attrs to be exported
"""
sdkInfo_dict = {}
sdkKey_Info = []
numberOfKeys = len(pm.listAttr("{0}.ktv".format(animNode), multi=True)) / 3
itt_list = pm.keyTangent(animNode, itt=True, q=True)
ott_list = pm.keyTangent(animNode, ott=True, q=True)
# maya doesnt return value if there is only one key frame set.
if itt_list == None:
itt_list = ["linear"]
if ott_list == None:
ott_list = ["linear"]
for index in range(0, numberOfKeys):
value = pm.getAttr("{0}.keyTimeValue[{1}]".format(animNode, index))
absoluteValue = pm.keyframe(animNode,
q=True,
valueChange=True,
index=index)[0]
keyData = [value[0], absoluteValue, itt_list[index], ott_list[index]]
sdkKey_Info.append(keyData)
sdkInfo_dict["keys"] = sdkKey_Info
sdkInfo_dict["type"] = animNode.type()
sdkInfo_dict["preInfinity"] = animNode.getAttr("preInfinity")
sdkInfo_dict["postInfinity"] = animNode.getAttr("postInfinity")
sdkInfo_dict["weightedTangents"] = animNode.getAttr("weightedTangents")
animNodeInputPlug = "{0}.input".format(animNode.nodeName())
sourceDriverAttr = pm.listConnections(animNodeInputPlug,
source=True,
plugs=True,
scn=True)[0]
driverNode, driverAttr = sourceDriverAttr.split(".")
sdkInfo_dict["driverNode"] = driverNode
sdkInfo_dict["driverAttr"] = driverAttr
animNodeOutputPlug = "{0}.output".format(animNode.nodeName())
drivenNode, drivenAttr = getSDKDestination(animNodeOutputPlug)
sdkInfo_dict["drivenNode"] = drivenNode
sdkInfo_dict["drivenAttr"] = drivenAttr
return sdkInfo_dict
def getAllSDKInfoFromNode(node):
"""returns a dict for all of the connected sdk/animCurve on
the provided node
Args:
node (pynode): name of node to the be searched
Returns:
dict: of all of the sdk nodes
"""
allSDKInfo_dict = {}
retrievedSDKNodes = getConnectedSDKs(node)
retrievedSDKNodes.extend(getMultiDriverSDKs(node))
for animPlug, targetPlug in retrievedSDKNodes:
allSDKInfo_dict[animPlug.nodeName()] = getSDKInfo(animPlug.node())
return allSDKInfo_dict
def removeSDKs(node, attributes=[], sourceDriverFilter=None):
"""Convenience function to remove, delete, all sdk nodes associated with
the provided node
Args:
node (pynode): name of the node
attributes (list, optional): list of attributes to remove sdks from
if none provided, assume all
sourceDriverFilter (list, pynode): Driver transforms to filter by,
if the connected SDK is not driven by this node it will not be returned.
"""
toDelete = []
# if no attrs provided, assume all
if not attributes:
attributes = pm.listAttr(node, connectable=True)
sourceSDKInfo = getConnectedSDKs(node, sourceDriverFilter=sourceDriverFilter)
sourceSDKInfo.extend(getMultiDriverSDKs(node, sourceDriverFilter=sourceDriverFilter))
for source, dest in sourceSDKInfo:
if dest.plugAttr(longName=True) not in attributes:
continue
toDelete.append(source.node())
pm.delete(toDelete)
def copySDKsToNode(sourceDriven,
targetDriver,
targetDriven,
sourceAttributes=[],
sourceDriverFilter=None):
"""Duplicates sdk nodes from the source drive, to any designated target
driver/driven
Args:
sourceDriven (pynode): source to copy from
targetDriver (pynode): to drive the new sdk node
targetDriven (pynode): node to be driven
sourceAttributes (list, optional): of attrs to copy, if none provided
assume all
sourceDriverFilter (list, pynode): Driver transforms to filter by,
if the connected SDK is not driven by this node it will not be returned.
Returns:
TYPE: n/a
"""
sourceDriven, targetDriver, targetDriven = getPynodes([sourceDriven,
targetDriver,
targetDriven])
if sourceDriven == targetDriven:
pm.warning("You cannot copy SDKs to the same name.")
return
# if no attrs provided, assume all
if not sourceAttributes:
sourceAttributes = pm.listAttr(sourceDriven, connectable=True)
# sourceDriverFilter = None
sourceSDKInfo = getConnectedSDKs(sourceDriven, sourceDriverFilter=None)
sourceSDKInfo.extend(getMultiDriverSDKs(sourceDriven, sourceDriverFilter))
for source, dest in sourceSDKInfo:
if dest.plugAttr(longName=True) not in sourceAttributes:
continue
sourceDriverAttr = pm.listConnections("{0}.input".format(
source.nodeName()),
source=True,
plugs=True,
scn=True)[0]
duplicateCurve = pm.duplicate(source, name="{0}_{1}".format(
targetDriven.name(),
dest.attrName(longName=True)))[0]
pm.connectAttr("{0}.{1}".format(
targetDriver,
sourceDriverAttr.attrName(longName=True)),
"{0}.input".format(duplicateCurve))
# drivenNode, drivenAttr = getSDKDestination(duplicateCurve.output)
drivenAttrPlug = "{0}.{1}".format(targetDriven,
dest.name(includeNode=False))
if pm.listConnections(drivenAttrPlug):
targetAttrPlug = getBlendNodes(drivenAttrPlug)
else:
targetAttrPlug = drivenAttrPlug
try:
pm.connectAttr(duplicateCurve.output, targetAttrPlug)
except RuntimeError:
# error when trying to connect to a plug that is already connected.
# trying next avalible plug.
targetAttrPlug = targetAttrPlug.replace("[1]", "[0]")
pm.connectAttr(duplicateCurve.output, targetAttrPlug)
def stripKeys(animNode):
"""remove animation keys from the provided sdk node
Args:
animNode (pynode): sdk/anim node
"""
numKeys = len(pm.listAttr(animNode + ".ktv", multi=True)) / 3
for x in range(0, numKeys):
animNode.remove(0)
def invertKeyValues(newKeyNode, invertDriver=True, invertDriven=True):
"""Mirror keyframe node procedure, in case you need to flip your SDK's.
Args:
newKeyNode (PyNode): sdk node to invert values on
invertDriver (bool, optional): should the drivers values be inverted
invertDriven (bool, optional): should the drivens values be inverted
"""
sdkInfo_dict = getSDKInfo(newKeyNode)
stripKeys(newKeyNode)
animKeys = sdkInfo_dict["keys"]
for index in range(0, len(animKeys)):
frameValue = animKeys[index]
if invertDriver and invertDriven:
timeValue = frameValue[0] * -1
value = frameValue[1] * -1
elif invertDriver and not invertDriven:
timeValue = frameValue[0] * -1
value = frameValue[1]
elif not invertDriver and invertDriven:
timeValue = frameValue[0]
value = frameValue[1] * -1
else:
timeValue = frameValue[0]
value = frameValue[1]
pm.setKeyframe(newKeyNode,
float=timeValue,
value=value,
itt=frameValue[2],
ott=frameValue[3])
def mirrorSDKkeys(node, attributes=[], invertDriver=True, invertDriven=True):
"""mirror/invert the values on the specified node and attrs, get the sdks
and invert those values
Args:
node (pynode): node being driven to have its sdk values inverted
attributes (list, optional): attrs to be inverted
invertDriver (bool, optional): should the driver, "time" values
be inverted
invertDriven (bool, optional): should the driven, "value" values
be inverted
"""
sourceSDKInfo = getConnectedSDKs(node)
sourceSDKInfo.extend(getMultiDriverSDKs(node))
if not attributes:
attributes = pm.listAttr(node, connectable=True)
for source, dest in sourceSDKInfo:
if dest.plugAttr(longName=True) not in attributes:
continue
animCurve = source.node()
invertKeyValues(animCurve,
invertDriver=invertDriver,
invertDriven=invertDriven)
def getBlendNodes(attrPlug):
"""Check the attrPlug (node.attr) provided for any existing connections
if blendWeighted exists, return the appropriate input[#], if sdk, create
a blendweighted and connect sdk, return input[#]
Args:
attrPlug (string): node.attr
Returns:
string: node.attr of the blendweighted node that was just created or
existing
"""
# check what the connection type is
blendNode = pm.listConnections(attrPlug, scn=True)
if pm.nodeType(blendNode[0]) in SDK_ANIMCURVES_TYPE:
existingAnimNode = blendNode[0]
blendNodeName = "{0}_bwn".format(attrPlug.replace(".", "_"))
blendNode = [pm.createNode("blendWeighted", n=blendNodeName)]
pm.connectAttr(blendNode[0].attr("output"), attrPlug, f=True)
destPlug = "{0}.input[0]".format(blendNode[0].name())
pm.connectAttr(existingAnimNode.attr("output"), destPlug, f=True)
if pm.nodeType(blendNode[0]) in SDK_UTILITY_TYPE:
blendNode = blendNode[0]
if type(blendNode) == list:
blendNode = blendNode[0]
numOfInputs = len(blendNode.getAttr("input"))
destPlug = "{0}.input[{1}]".format(blendNode.name(), numOfInputs)
return destPlug
def createSDKFromDict(sdkInfo_dict):
"""Create a sdk node from the provided info dict
Args:
sdkInfo_dict (dict): dict of node information to create
Returns:
PyNode: created sdk node
"""
sdkName = "{0}_{1}".format(sdkInfo_dict["drivenNode"],
sdkInfo_dict["drivenAttr"])
sdkNode = pm.createNode(sdkInfo_dict["type"], name=sdkName, ss=True)
pm.connectAttr("{0}.{1}".format(sdkInfo_dict["driverNode"],
sdkInfo_dict["driverAttr"]),
"{0}.input".format(sdkNode), f=True)
drivenAttrPlug = "{0}.{1}".format(sdkInfo_dict["drivenNode"],
sdkInfo_dict["drivenAttr"])
if pm.listConnections(drivenAttrPlug):
targetAttrPlug = getBlendNodes(drivenAttrPlug)
else:
targetAttrPlug = drivenAttrPlug
pm.connectAttr(sdkNode.output, targetAttrPlug, f=True)
animKeys = sdkInfo_dict["keys"]
for index in range(0, len(animKeys)):
frameValue = animKeys[index]
pm.setKeyframe(sdkNode,
float=frameValue[0],
value=frameValue[1],
itt=frameValue[2],
ott=frameValue[3])
sdkNode.setAttr("preInfinity", sdkInfo_dict["preInfinity"])
sdkNode.setAttr("postInfinity", sdkInfo_dict["postInfinity"])
pm.keyTangent(sdkNode)
sdkNode.setWeighted(sdkInfo_dict["weightedTangents"])
return sdkNode
def exportSDKs(nodes, filePath):
"""exports the sdk information based on the provided nodes to a json file
Args:
nodes (list): of nodes to export
filePath (string): full filepath to export jsons to
"""
sdksToExport_dict = {}
for node in nodes:
node = getPynodes([node])[0]
sdksToExport_dict.update(getAllSDKInfoFromNode(node))
_exportData(sdksToExport_dict, filePath)
return sdksToExport_dict
@mUtils.one_undo
def importSDKs(filePath):
"""create sdk nodes from json file, connected to drivers and driven
Args:
filePath (string): path to json file
"""
allSDKInfo_dict = _importData(filePath)
createdNodes = []
failedNodes = []
for sdkName, sdkInfo_dict in allSDKInfo_dict.items():
try:
createdNodes.append(createSDKFromDict(sdkInfo_dict))
except Exception as e:
failedNodes.append(sdkName)
print("{0}:{1}".format(sdkName, e))
print("Nodes created ---------------------------------")
pprint.pprint(createdNodes)
print("Nodes failed ---------------------------------")
pprint.pprint(failedNodes)
|
en
| 0.7043
|
Rigbits, SDK i/o exportSDKs(["drivenNodeA", "drivenNodeB"], "path/to/desired/output.json") importSDKs(path/to/desired/output.json) # MIRRORING ------- # copy from source, say left, to target, right copySDKsToNode("jacketFlap_L1_fk0_sdk", "neck_C0_0_jnt", "jacketFlap_R1_fk0_sdk") # invert/mirror the attributes necessary for the other side, # in this case it is the following attributes mirrorSDKkeys("jacketFlap_R1_fk0_sdk", attributes=["rotateZ"], invertDriver=True, invertDriven=False) mirrorSDKkeys("jacketFlap_R1_fk0_sdk", attributes=["translateX", "translateY"], invertDriver=True, invertDriven=True) # in this other instance, it was the same copySDKsToNode("jacketFlap_L0_fk0_sdk", "neck_C0_0_jnt", "jacketFlap_R0_fk0_sdk") Attributes: SDK_ANIMCURVES_TYPE (list): sdk anim curves to support # ============================================================================== # Data export # ============================================================================== Return the contents of a json file. Expecting, but not limited to, a dictionary. Args: filePath (string): path to file Returns: dict: contents of json file, expected dict export data, dict, to filepath provided Args: data (dict): expected dict, not limited to filePath (string): path to output json file # ============================================================================== # pymel Convenience # ============================================================================== Conevenience function to allow uses to pass in strings, but convert to pynodes if not already. Args: nodes (list): string names Returns: list: of pynodes # ============================================================================== # sdk functions # ============================================================================== Get the final destination of the sdk node, skips blendweighted and conversion node to get the transform node. TODO: Open this up to provided type destination Args: animNodeOutputPlug (string): animationNode.output Returns: list: name of the node, and attr get the sdk nodes that are added through a blendweighted node Args: driven (string): name of the driven node sourceDriverFilter (list, pynode): Driver transforms to filter by, if the connected SDK is not driven by this node it will not be returned. Returns: list: of sdk nodes get all the sdk, animcurve, nodes/plugs connected to the provided node. Args: node (str, pynode): name of node, or pynode curvesOfType (list, optional): animCurve nodes of type if none provided will fall back on module defined supported set. sourceDriverFilter (list, pynode): Driver transforms to filter by, if the connected SDK is not driven by this node it will not be returned. Returns: list: of sdk nodes, paired with the node/attr they effect # If the filter is given, filter out only nodes driven by # transforms inside sourceDriverFilter # Getting the connected Driver Transform nodes # If any are found, add them to filteredSDKNodes # if the node name is in sourceDriverFilter # Replacing animCurveNodes with the new filtered list. get all the information from an sdk/animCurve in a dictioanry for exporting. Args: animNode (pynode): name of node, pynode Returns: dict: dictionary of all the attrs to be exported # maya doesnt return value if there is only one key frame set. returns a dict for all of the connected sdk/animCurve on the provided node Args: node (pynode): name of node to the be searched Returns: dict: of all of the sdk nodes Convenience function to remove, delete, all sdk nodes associated with the provided node Args: node (pynode): name of the node attributes (list, optional): list of attributes to remove sdks from if none provided, assume all sourceDriverFilter (list, pynode): Driver transforms to filter by, if the connected SDK is not driven by this node it will not be returned. # if no attrs provided, assume all Duplicates sdk nodes from the source drive, to any designated target driver/driven Args: sourceDriven (pynode): source to copy from targetDriver (pynode): to drive the new sdk node targetDriven (pynode): node to be driven sourceAttributes (list, optional): of attrs to copy, if none provided assume all sourceDriverFilter (list, pynode): Driver transforms to filter by, if the connected SDK is not driven by this node it will not be returned. Returns: TYPE: n/a # if no attrs provided, assume all # sourceDriverFilter = None # drivenNode, drivenAttr = getSDKDestination(duplicateCurve.output) # error when trying to connect to a plug that is already connected. # trying next avalible plug. remove animation keys from the provided sdk node Args: animNode (pynode): sdk/anim node Mirror keyframe node procedure, in case you need to flip your SDK's. Args: newKeyNode (PyNode): sdk node to invert values on invertDriver (bool, optional): should the drivers values be inverted invertDriven (bool, optional): should the drivens values be inverted mirror/invert the values on the specified node and attrs, get the sdks and invert those values Args: node (pynode): node being driven to have its sdk values inverted attributes (list, optional): attrs to be inverted invertDriver (bool, optional): should the driver, "time" values be inverted invertDriven (bool, optional): should the driven, "value" values be inverted Check the attrPlug (node.attr) provided for any existing connections if blendWeighted exists, return the appropriate input[#], if sdk, create a blendweighted and connect sdk, return input[#] Args: attrPlug (string): node.attr Returns: string: node.attr of the blendweighted node that was just created or existing # check what the connection type is Create a sdk node from the provided info dict Args: sdkInfo_dict (dict): dict of node information to create Returns: PyNode: created sdk node exports the sdk information based on the provided nodes to a json file Args: nodes (list): of nodes to export filePath (string): full filepath to export jsons to create sdk nodes from json file, connected to drivers and driven Args: filePath (string): path to json file
| 2.189811
| 2
|
gitlint/__init__.py
|
Lorac/gitlint
| 0
|
6625684
|
__version__ = "0.16.0dev"
|
__version__ = "0.16.0dev"
|
none
| 1
| 1.023569
| 1
|
|
slackest/channels.py
|
usserysig1/slackest
| 7
|
6625685
|
from .base_api import BaseAPI
from .utils import *
class Channels(BaseAPI):
"""Follows the Slack Channels API. See https://api.slack.com/methods"""
def create(self, name):
"""
Creates a public channel
:param name: The name
:type name: str
:return: A response object to run the API request.
:rtype: :class:`Response <Response>` object
"""
return self.post('channels.create', data={'name': name})
def info(self, channel):
"""
Retrieves information about a public channel
:param name: The channel ID
:type name: str
:return: A response object to run the API request.
:rtype: :class:`Response <Response>` object
"""
return self.get('channels.info', params={'channel': channel})
def list(self, exclude_archived=True, exclude_members=False):
"""
Lists channels
:param exclude_archived: Exclude archived channels
:type exclude_archived: bool
:param exclude_members: Exclude members from being listed
:type exclude_members: bool
:return: A response object to run the API request.
:rtype: :class:`Response <Response>` object
"""
yield self.get('channels.list',
params={'exclude_archived': str(exclude_archived).lower(),
'exclude_members': str(exclude_members).lower()})
def history(self, channel, latest=None, oldest=None, count=None,
inclusive=False, unreads=False):
"""
Fetches history of messages and events from a channel
:param channel: The channel ID
:type channel: str
:param latest: End of time range to include in results
:type latest: str
:param oldest: Start of time range to include in results
:type oldest: str
:param count: The number of messages to return
:type count: int
:param inclusive: Include messages with latest or oldest timestamp in results
:type inclusive: bool
:param unreads: Include `unread_count_display` in the output
:type unreads: bool
:return: A response object to run the API request.
:rtype: :class:`Response <Response>` object
"""
return self.get('channels.history',
params={
'channel': channel,
'latest': latest,
'oldest': oldest,
'count': count,
'inclusive': int(inclusive),
'unreads': int(unreads)
})
def mark(self, channel, time_stamp):
"""
Moves the read cursor in a public channel
:param channel: The channel ID
:type channel: str
:param time_stamp: The timestamp of the most recently seen message
:type time_stamp: str
:return: A response object to run the API request.
:rtype: :class:`Response <Response>` object
"""
return self.post('channels.mark',
data={'channel': channel, 'ts': time_stamp})
def join(self, name):
"""
Allows a user object to join a channel
:param name: The channel name (#general)
:type name: str
:return: A response object to run the API request.
:rtype: :class:`Response <Response>` object
"""
return self.post('channels.join', data={'name': name})
def leave(self, channel):
"""
Allows a user object to leave a channel
:param name: The channel ID
:type name: str
:return: A response object to run the API request.
:rtype: :class:`Response <Response>` object
"""
return self.post('channels.leave', data={'channel': channel})
def invite(self, channel, user):
"""
Invites a user to a private channel
:param channel: The channel ID
:type channel: str
:param user: The user ID
:type user: str
:return: A response object to run the API request.
:rtype: :class:`Response <Response>` object
"""
return self.post('channels.invite',
data={'channel': channel, 'user': user})
def kick(self, channel, user):
"""
Removes a user from a channel
:param channel: The private channel ID
:type channel: str
:param user: The user ID
:type user: str
:return: A response object to run the API request.
:rtype: :class:`Response <Response>` object
"""
return self.post('channels.kick',
data={'channel': channel, 'user': user})
def rename(self, channel, name):
"""
Renames a channel
:param channel: The channel ID
:type channel: str
:param name: The new user-friendly name of the channel
:type name: str
:return: A response object to run the API request.
:rtype: :class:`Response <Response>` object
"""
return self.post('channels.rename',
data={'channel': channel, 'name': name})
def replies(self, channel, thread_ts):
"""
Retrieve a thread of messages posted to a channel
:param channel: The channel ID
:type channel: str
:param thread_ts: Unique identifier of a thread's parent message
:type thread_ts: str
:return: A response object to run the API request.
:rtype: :class:`Response <Response>` object
"""
return self.get('channels.replies',
params={'channel': channel, 'thread_ts': thread_ts})
def archive(self, channel):
"""
Archives a public channel
:param channel: The channel ID
:type channel: str
:return: A response object to run the API request.
:rtype: :class:`Response <Response>` object
"""
return self.post('channels.archive', data={'channel': channel})
def unarchive(self, channel):
"""
Unarchives a channel
:param channel: The channel ID
:type channel: str
:return: A response object to run the API request.
:rtype: :class:`Response <Response>` object
"""
return self.post('channels.unarchive', data={'channel': channel})
def set_purpose(self, channel, purpose):
"""
Sets the purpose of a channel
:param channel: The channel ID
:type channel: str
:param purpose: The purpose
:type purpose: str
:return: A response object to run the API request.
:rtype: :class:`Response <Response>` object
"""
return self.post('channels.setPurpose',
data={'channel': channel, 'purpose': purpose})
def set_topic(self, channel, topic):
"""
Sets the topic of a channel
:param channel: The channel ID
:type channel: str
:param topic: The topic
:type topic: str
:return: A response object to run the API request.
:rtype: :class:`Response <Response>` object
"""
return self.post('channels.setTopic',
data={'channel': channel, 'topic': topic})
def get_channel_id(self, channel_name):
"""
Gets a channel ID according to the channel's name
:param channel_name: The channel's name
:type channel_name: str
:return: Returns the channel ID
:rtype: str
"""
channels_gen = next(self.list())
channels = channels_gen.body['channels']
return get_item_id_by_name(channels, channel_name)
|
from .base_api import BaseAPI
from .utils import *
class Channels(BaseAPI):
"""Follows the Slack Channels API. See https://api.slack.com/methods"""
def create(self, name):
"""
Creates a public channel
:param name: The name
:type name: str
:return: A response object to run the API request.
:rtype: :class:`Response <Response>` object
"""
return self.post('channels.create', data={'name': name})
def info(self, channel):
"""
Retrieves information about a public channel
:param name: The channel ID
:type name: str
:return: A response object to run the API request.
:rtype: :class:`Response <Response>` object
"""
return self.get('channels.info', params={'channel': channel})
def list(self, exclude_archived=True, exclude_members=False):
"""
Lists channels
:param exclude_archived: Exclude archived channels
:type exclude_archived: bool
:param exclude_members: Exclude members from being listed
:type exclude_members: bool
:return: A response object to run the API request.
:rtype: :class:`Response <Response>` object
"""
yield self.get('channels.list',
params={'exclude_archived': str(exclude_archived).lower(),
'exclude_members': str(exclude_members).lower()})
def history(self, channel, latest=None, oldest=None, count=None,
inclusive=False, unreads=False):
"""
Fetches history of messages and events from a channel
:param channel: The channel ID
:type channel: str
:param latest: End of time range to include in results
:type latest: str
:param oldest: Start of time range to include in results
:type oldest: str
:param count: The number of messages to return
:type count: int
:param inclusive: Include messages with latest or oldest timestamp in results
:type inclusive: bool
:param unreads: Include `unread_count_display` in the output
:type unreads: bool
:return: A response object to run the API request.
:rtype: :class:`Response <Response>` object
"""
return self.get('channels.history',
params={
'channel': channel,
'latest': latest,
'oldest': oldest,
'count': count,
'inclusive': int(inclusive),
'unreads': int(unreads)
})
def mark(self, channel, time_stamp):
"""
Moves the read cursor in a public channel
:param channel: The channel ID
:type channel: str
:param time_stamp: The timestamp of the most recently seen message
:type time_stamp: str
:return: A response object to run the API request.
:rtype: :class:`Response <Response>` object
"""
return self.post('channels.mark',
data={'channel': channel, 'ts': time_stamp})
def join(self, name):
"""
Allows a user object to join a channel
:param name: The channel name (#general)
:type name: str
:return: A response object to run the API request.
:rtype: :class:`Response <Response>` object
"""
return self.post('channels.join', data={'name': name})
def leave(self, channel):
"""
Allows a user object to leave a channel
:param name: The channel ID
:type name: str
:return: A response object to run the API request.
:rtype: :class:`Response <Response>` object
"""
return self.post('channels.leave', data={'channel': channel})
def invite(self, channel, user):
"""
Invites a user to a private channel
:param channel: The channel ID
:type channel: str
:param user: The user ID
:type user: str
:return: A response object to run the API request.
:rtype: :class:`Response <Response>` object
"""
return self.post('channels.invite',
data={'channel': channel, 'user': user})
def kick(self, channel, user):
"""
Removes a user from a channel
:param channel: The private channel ID
:type channel: str
:param user: The user ID
:type user: str
:return: A response object to run the API request.
:rtype: :class:`Response <Response>` object
"""
return self.post('channels.kick',
data={'channel': channel, 'user': user})
def rename(self, channel, name):
"""
Renames a channel
:param channel: The channel ID
:type channel: str
:param name: The new user-friendly name of the channel
:type name: str
:return: A response object to run the API request.
:rtype: :class:`Response <Response>` object
"""
return self.post('channels.rename',
data={'channel': channel, 'name': name})
def replies(self, channel, thread_ts):
"""
Retrieve a thread of messages posted to a channel
:param channel: The channel ID
:type channel: str
:param thread_ts: Unique identifier of a thread's parent message
:type thread_ts: str
:return: A response object to run the API request.
:rtype: :class:`Response <Response>` object
"""
return self.get('channels.replies',
params={'channel': channel, 'thread_ts': thread_ts})
def archive(self, channel):
"""
Archives a public channel
:param channel: The channel ID
:type channel: str
:return: A response object to run the API request.
:rtype: :class:`Response <Response>` object
"""
return self.post('channels.archive', data={'channel': channel})
def unarchive(self, channel):
"""
Unarchives a channel
:param channel: The channel ID
:type channel: str
:return: A response object to run the API request.
:rtype: :class:`Response <Response>` object
"""
return self.post('channels.unarchive', data={'channel': channel})
def set_purpose(self, channel, purpose):
"""
Sets the purpose of a channel
:param channel: The channel ID
:type channel: str
:param purpose: The purpose
:type purpose: str
:return: A response object to run the API request.
:rtype: :class:`Response <Response>` object
"""
return self.post('channels.setPurpose',
data={'channel': channel, 'purpose': purpose})
def set_topic(self, channel, topic):
"""
Sets the topic of a channel
:param channel: The channel ID
:type channel: str
:param topic: The topic
:type topic: str
:return: A response object to run the API request.
:rtype: :class:`Response <Response>` object
"""
return self.post('channels.setTopic',
data={'channel': channel, 'topic': topic})
def get_channel_id(self, channel_name):
"""
Gets a channel ID according to the channel's name
:param channel_name: The channel's name
:type channel_name: str
:return: Returns the channel ID
:rtype: str
"""
channels_gen = next(self.list())
channels = channels_gen.body['channels']
return get_item_id_by_name(channels, channel_name)
|
en
| 0.749802
|
Follows the Slack Channels API. See https://api.slack.com/methods Creates a public channel :param name: The name :type name: str :return: A response object to run the API request. :rtype: :class:`Response <Response>` object Retrieves information about a public channel :param name: The channel ID :type name: str :return: A response object to run the API request. :rtype: :class:`Response <Response>` object Lists channels :param exclude_archived: Exclude archived channels :type exclude_archived: bool :param exclude_members: Exclude members from being listed :type exclude_members: bool :return: A response object to run the API request. :rtype: :class:`Response <Response>` object Fetches history of messages and events from a channel :param channel: The channel ID :type channel: str :param latest: End of time range to include in results :type latest: str :param oldest: Start of time range to include in results :type oldest: str :param count: The number of messages to return :type count: int :param inclusive: Include messages with latest or oldest timestamp in results :type inclusive: bool :param unreads: Include `unread_count_display` in the output :type unreads: bool :return: A response object to run the API request. :rtype: :class:`Response <Response>` object Moves the read cursor in a public channel :param channel: The channel ID :type channel: str :param time_stamp: The timestamp of the most recently seen message :type time_stamp: str :return: A response object to run the API request. :rtype: :class:`Response <Response>` object Allows a user object to join a channel :param name: The channel name (#general) :type name: str :return: A response object to run the API request. :rtype: :class:`Response <Response>` object Allows a user object to leave a channel :param name: The channel ID :type name: str :return: A response object to run the API request. :rtype: :class:`Response <Response>` object Invites a user to a private channel :param channel: The channel ID :type channel: str :param user: The user ID :type user: str :return: A response object to run the API request. :rtype: :class:`Response <Response>` object Removes a user from a channel :param channel: The private channel ID :type channel: str :param user: The user ID :type user: str :return: A response object to run the API request. :rtype: :class:`Response <Response>` object Renames a channel :param channel: The channel ID :type channel: str :param name: The new user-friendly name of the channel :type name: str :return: A response object to run the API request. :rtype: :class:`Response <Response>` object Retrieve a thread of messages posted to a channel :param channel: The channel ID :type channel: str :param thread_ts: Unique identifier of a thread's parent message :type thread_ts: str :return: A response object to run the API request. :rtype: :class:`Response <Response>` object Archives a public channel :param channel: The channel ID :type channel: str :return: A response object to run the API request. :rtype: :class:`Response <Response>` object Unarchives a channel :param channel: The channel ID :type channel: str :return: A response object to run the API request. :rtype: :class:`Response <Response>` object Sets the purpose of a channel :param channel: The channel ID :type channel: str :param purpose: The purpose :type purpose: str :return: A response object to run the API request. :rtype: :class:`Response <Response>` object Sets the topic of a channel :param channel: The channel ID :type channel: str :param topic: The topic :type topic: str :return: A response object to run the API request. :rtype: :class:`Response <Response>` object Gets a channel ID according to the channel's name :param channel_name: The channel's name :type channel_name: str :return: Returns the channel ID :rtype: str
| 2.998763
| 3
|
sdk/python/pulumi_azure_nextgen/machinelearningservices/v20191101/get_workspace.py
|
test-wiz-sec/pulumi-azure-nextgen
| 0
|
6625686
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetWorkspaceResult',
'AwaitableGetWorkspaceResult',
'get_workspace',
]
@pulumi.output_type
class GetWorkspaceResult:
"""
An object that represents a machine learning workspace.
"""
def __init__(__self__, application_insights=None, container_registry=None, creation_time=None, description=None, discovery_url=None, friendly_name=None, identity=None, key_vault=None, location=None, name=None, provisioning_state=None, sku=None, storage_account=None, tags=None, type=None, workspace_id=None):
if application_insights and not isinstance(application_insights, str):
raise TypeError("Expected argument 'application_insights' to be a str")
pulumi.set(__self__, "application_insights", application_insights)
if container_registry and not isinstance(container_registry, str):
raise TypeError("Expected argument 'container_registry' to be a str")
pulumi.set(__self__, "container_registry", container_registry)
if creation_time and not isinstance(creation_time, str):
raise TypeError("Expected argument 'creation_time' to be a str")
pulumi.set(__self__, "creation_time", creation_time)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if discovery_url and not isinstance(discovery_url, str):
raise TypeError("Expected argument 'discovery_url' to be a str")
pulumi.set(__self__, "discovery_url", discovery_url)
if friendly_name and not isinstance(friendly_name, str):
raise TypeError("Expected argument 'friendly_name' to be a str")
pulumi.set(__self__, "friendly_name", friendly_name)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if key_vault and not isinstance(key_vault, str):
raise TypeError("Expected argument 'key_vault' to be a str")
pulumi.set(__self__, "key_vault", key_vault)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if storage_account and not isinstance(storage_account, str):
raise TypeError("Expected argument 'storage_account' to be a str")
pulumi.set(__self__, "storage_account", storage_account)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if workspace_id and not isinstance(workspace_id, str):
raise TypeError("Expected argument 'workspace_id' to be a str")
pulumi.set(__self__, "workspace_id", workspace_id)
@property
@pulumi.getter(name="applicationInsights")
def application_insights(self) -> Optional[str]:
"""
ARM id of the application insights associated with this workspace. This cannot be changed once the workspace has been created
"""
return pulumi.get(self, "application_insights")
@property
@pulumi.getter(name="containerRegistry")
def container_registry(self) -> Optional[str]:
"""
ARM id of the container registry associated with this workspace. This cannot be changed once the workspace has been created
"""
return pulumi.get(self, "container_registry")
@property
@pulumi.getter(name="creationTime")
def creation_time(self) -> str:
"""
The creation time of the machine learning workspace in ISO8601 format.
"""
return pulumi.get(self, "creation_time")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
The description of this workspace.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="discoveryUrl")
def discovery_url(self) -> Optional[str]:
"""
Url for the discovery service to identify regional endpoints for machine learning experimentation services
"""
return pulumi.get(self, "discovery_url")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
"""
The friendly name for this workspace. This name in mutable
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.IdentityResponse']:
"""
The identity of the resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter(name="keyVault")
def key_vault(self) -> Optional[str]:
"""
ARM id of the key vault associated with this workspace. This cannot be changed once the workspace has been created
"""
return pulumi.get(self, "key_vault")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Specifies the name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The current deployment state of workspace resource. The provisioningState is to indicate states for resource provisioning.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
The sku of the workspace.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="storageAccount")
def storage_account(self) -> Optional[str]:
"""
ARM id of the storage account associated with this workspace. This cannot be changed once the workspace has been created
"""
return pulumi.get(self, "storage_account")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Contains resource tags defined as key/value pairs.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Specifies the type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="workspaceId")
def workspace_id(self) -> str:
"""
The immutable id associated with this workspace.
"""
return pulumi.get(self, "workspace_id")
class AwaitableGetWorkspaceResult(GetWorkspaceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWorkspaceResult(
application_insights=self.application_insights,
container_registry=self.container_registry,
creation_time=self.creation_time,
description=self.description,
discovery_url=self.discovery_url,
friendly_name=self.friendly_name,
identity=self.identity,
key_vault=self.key_vault,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
sku=self.sku,
storage_account=self.storage_account,
tags=self.tags,
type=self.type,
workspace_id=self.workspace_id)
def get_workspace(resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWorkspaceResult:
"""
Use this data source to access information about an existing resource.
:param str resource_group_name: Name of the resource group in which workspace is located.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:machinelearningservices/v20191101:getWorkspace', __args__, opts=opts, typ=GetWorkspaceResult).value
return AwaitableGetWorkspaceResult(
application_insights=__ret__.application_insights,
container_registry=__ret__.container_registry,
creation_time=__ret__.creation_time,
description=__ret__.description,
discovery_url=__ret__.discovery_url,
friendly_name=__ret__.friendly_name,
identity=__ret__.identity,
key_vault=__ret__.key_vault,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
sku=__ret__.sku,
storage_account=__ret__.storage_account,
tags=__ret__.tags,
type=__ret__.type,
workspace_id=__ret__.workspace_id)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetWorkspaceResult',
'AwaitableGetWorkspaceResult',
'get_workspace',
]
@pulumi.output_type
class GetWorkspaceResult:
"""
An object that represents a machine learning workspace.
"""
def __init__(__self__, application_insights=None, container_registry=None, creation_time=None, description=None, discovery_url=None, friendly_name=None, identity=None, key_vault=None, location=None, name=None, provisioning_state=None, sku=None, storage_account=None, tags=None, type=None, workspace_id=None):
if application_insights and not isinstance(application_insights, str):
raise TypeError("Expected argument 'application_insights' to be a str")
pulumi.set(__self__, "application_insights", application_insights)
if container_registry and not isinstance(container_registry, str):
raise TypeError("Expected argument 'container_registry' to be a str")
pulumi.set(__self__, "container_registry", container_registry)
if creation_time and not isinstance(creation_time, str):
raise TypeError("Expected argument 'creation_time' to be a str")
pulumi.set(__self__, "creation_time", creation_time)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if discovery_url and not isinstance(discovery_url, str):
raise TypeError("Expected argument 'discovery_url' to be a str")
pulumi.set(__self__, "discovery_url", discovery_url)
if friendly_name and not isinstance(friendly_name, str):
raise TypeError("Expected argument 'friendly_name' to be a str")
pulumi.set(__self__, "friendly_name", friendly_name)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if key_vault and not isinstance(key_vault, str):
raise TypeError("Expected argument 'key_vault' to be a str")
pulumi.set(__self__, "key_vault", key_vault)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if storage_account and not isinstance(storage_account, str):
raise TypeError("Expected argument 'storage_account' to be a str")
pulumi.set(__self__, "storage_account", storage_account)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if workspace_id and not isinstance(workspace_id, str):
raise TypeError("Expected argument 'workspace_id' to be a str")
pulumi.set(__self__, "workspace_id", workspace_id)
@property
@pulumi.getter(name="applicationInsights")
def application_insights(self) -> Optional[str]:
"""
ARM id of the application insights associated with this workspace. This cannot be changed once the workspace has been created
"""
return pulumi.get(self, "application_insights")
@property
@pulumi.getter(name="containerRegistry")
def container_registry(self) -> Optional[str]:
"""
ARM id of the container registry associated with this workspace. This cannot be changed once the workspace has been created
"""
return pulumi.get(self, "container_registry")
@property
@pulumi.getter(name="creationTime")
def creation_time(self) -> str:
"""
The creation time of the machine learning workspace in ISO8601 format.
"""
return pulumi.get(self, "creation_time")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
The description of this workspace.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="discoveryUrl")
def discovery_url(self) -> Optional[str]:
"""
Url for the discovery service to identify regional endpoints for machine learning experimentation services
"""
return pulumi.get(self, "discovery_url")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
"""
The friendly name for this workspace. This name in mutable
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.IdentityResponse']:
"""
The identity of the resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter(name="keyVault")
def key_vault(self) -> Optional[str]:
"""
ARM id of the key vault associated with this workspace. This cannot be changed once the workspace has been created
"""
return pulumi.get(self, "key_vault")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Specifies the name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The current deployment state of workspace resource. The provisioningState is to indicate states for resource provisioning.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
The sku of the workspace.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="storageAccount")
def storage_account(self) -> Optional[str]:
"""
ARM id of the storage account associated with this workspace. This cannot be changed once the workspace has been created
"""
return pulumi.get(self, "storage_account")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Contains resource tags defined as key/value pairs.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Specifies the type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="workspaceId")
def workspace_id(self) -> str:
"""
The immutable id associated with this workspace.
"""
return pulumi.get(self, "workspace_id")
class AwaitableGetWorkspaceResult(GetWorkspaceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWorkspaceResult(
application_insights=self.application_insights,
container_registry=self.container_registry,
creation_time=self.creation_time,
description=self.description,
discovery_url=self.discovery_url,
friendly_name=self.friendly_name,
identity=self.identity,
key_vault=self.key_vault,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
sku=self.sku,
storage_account=self.storage_account,
tags=self.tags,
type=self.type,
workspace_id=self.workspace_id)
def get_workspace(resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWorkspaceResult:
"""
Use this data source to access information about an existing resource.
:param str resource_group_name: Name of the resource group in which workspace is located.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:machinelearningservices/v20191101:getWorkspace', __args__, opts=opts, typ=GetWorkspaceResult).value
return AwaitableGetWorkspaceResult(
application_insights=__ret__.application_insights,
container_registry=__ret__.container_registry,
creation_time=__ret__.creation_time,
description=__ret__.description,
discovery_url=__ret__.discovery_url,
friendly_name=__ret__.friendly_name,
identity=__ret__.identity,
key_vault=__ret__.key_vault,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
sku=__ret__.sku,
storage_account=__ret__.storage_account,
tags=__ret__.tags,
type=__ret__.type,
workspace_id=__ret__.workspace_id)
|
en
| 0.914259
|
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** An object that represents a machine learning workspace. ARM id of the application insights associated with this workspace. This cannot be changed once the workspace has been created ARM id of the container registry associated with this workspace. This cannot be changed once the workspace has been created The creation time of the machine learning workspace in ISO8601 format. The description of this workspace. Url for the discovery service to identify regional endpoints for machine learning experimentation services The friendly name for this workspace. This name in mutable The identity of the resource. ARM id of the key vault associated with this workspace. This cannot be changed once the workspace has been created Specifies the location of the resource. Specifies the name of the resource. The current deployment state of workspace resource. The provisioningState is to indicate states for resource provisioning. The sku of the workspace. ARM id of the storage account associated with this workspace. This cannot be changed once the workspace has been created Contains resource tags defined as key/value pairs. Specifies the type of the resource. The immutable id associated with this workspace. # pylint: disable=using-constant-test Use this data source to access information about an existing resource. :param str resource_group_name: Name of the resource group in which workspace is located. :param str workspace_name: Name of Azure Machine Learning workspace.
| 1.614012
| 2
|
homeassistant/components/vicare/water_heater.py
|
Watchfox/home-assistant
| 2
|
6625687
|
"""Viessmann ViCare water_heater device."""
import logging
from homeassistant.components.water_heater import (
SUPPORT_TARGET_TEMPERATURE,
WaterHeaterDevice,
)
from homeassistant.const import TEMP_CELSIUS, ATTR_TEMPERATURE, PRECISION_WHOLE
from . import DOMAIN as VICARE_DOMAIN
from . import VICARE_API
from . import VICARE_NAME
from . import VICARE_HEATING_TYPE
_LOGGER = logging.getLogger(__name__)
VICARE_MODE_DHW = "dhw"
VICARE_MODE_DHWANDHEATING = "dhwAndHeating"
VICARE_MODE_FORCEDREDUCED = "forcedReduced"
VICARE_MODE_FORCEDNORMAL = "forcedNormal"
VICARE_MODE_OFF = "standby"
VICARE_TEMP_WATER_MIN = 10
VICARE_TEMP_WATER_MAX = 60
OPERATION_MODE_ON = "on"
OPERATION_MODE_OFF = "off"
SUPPORT_FLAGS_HEATER = SUPPORT_TARGET_TEMPERATURE
VICARE_TO_HA_HVAC_DHW = {
VICARE_MODE_DHW: OPERATION_MODE_ON,
VICARE_MODE_DHWANDHEATING: OPERATION_MODE_ON,
VICARE_MODE_FORCEDREDUCED: OPERATION_MODE_OFF,
VICARE_MODE_FORCEDNORMAL: OPERATION_MODE_ON,
VICARE_MODE_OFF: OPERATION_MODE_OFF,
}
HA_TO_VICARE_HVAC_DHW = {
OPERATION_MODE_OFF: VICARE_MODE_OFF,
OPERATION_MODE_ON: VICARE_MODE_DHW,
}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Create the ViCare water_heater devices."""
if discovery_info is None:
return
vicare_api = hass.data[VICARE_DOMAIN][VICARE_API]
heating_type = hass.data[VICARE_DOMAIN][VICARE_HEATING_TYPE]
add_entities(
[
ViCareWater(
f"{hass.data[VICARE_DOMAIN][VICARE_NAME]} Water",
vicare_api,
heating_type,
)
]
)
class ViCareWater(WaterHeaterDevice):
"""Representation of the ViCare domestic hot water device."""
def __init__(self, name, api, heating_type):
"""Initialize the DHW water_heater device."""
self._name = name
self._state = None
self._api = api
self._attributes = {}
self._target_temperature = None
self._current_temperature = None
self._current_mode = None
self._heating_type = heating_type
def update(self):
"""Let HA know there has been an update from the ViCare API."""
current_temperature = self._api.getDomesticHotWaterStorageTemperature()
if current_temperature is not None and current_temperature != "error":
self._current_temperature = current_temperature
else:
self._current_temperature = None
self._target_temperature = self._api.getDomesticHotWaterConfiguredTemperature()
self._current_mode = self._api.getActiveMode()
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS_HEATER
@property
def name(self):
"""Return the name of the water_heater device."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
def set_temperature(self, **kwargs):
"""Set new target temperatures."""
temp = kwargs.get(ATTR_TEMPERATURE)
if temp is not None:
self._api.setDomesticHotWaterTemperature(self._target_temperature)
@property
def min_temp(self):
"""Return the minimum temperature."""
return VICARE_TEMP_WATER_MIN
@property
def max_temp(self):
"""Return the maximum temperature."""
return VICARE_TEMP_WATER_MAX
@property
def precision(self):
"""Return the precision of the system."""
return PRECISION_WHOLE
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
return VICARE_TO_HA_HVAC_DHW.get(self._current_mode)
@property
def operation_list(self):
"""Return the list of available operation modes."""
return list(HA_TO_VICARE_HVAC_DHW)
|
"""Viessmann ViCare water_heater device."""
import logging
from homeassistant.components.water_heater import (
SUPPORT_TARGET_TEMPERATURE,
WaterHeaterDevice,
)
from homeassistant.const import TEMP_CELSIUS, ATTR_TEMPERATURE, PRECISION_WHOLE
from . import DOMAIN as VICARE_DOMAIN
from . import VICARE_API
from . import VICARE_NAME
from . import VICARE_HEATING_TYPE
_LOGGER = logging.getLogger(__name__)
VICARE_MODE_DHW = "dhw"
VICARE_MODE_DHWANDHEATING = "dhwAndHeating"
VICARE_MODE_FORCEDREDUCED = "forcedReduced"
VICARE_MODE_FORCEDNORMAL = "forcedNormal"
VICARE_MODE_OFF = "standby"
VICARE_TEMP_WATER_MIN = 10
VICARE_TEMP_WATER_MAX = 60
OPERATION_MODE_ON = "on"
OPERATION_MODE_OFF = "off"
SUPPORT_FLAGS_HEATER = SUPPORT_TARGET_TEMPERATURE
VICARE_TO_HA_HVAC_DHW = {
VICARE_MODE_DHW: OPERATION_MODE_ON,
VICARE_MODE_DHWANDHEATING: OPERATION_MODE_ON,
VICARE_MODE_FORCEDREDUCED: OPERATION_MODE_OFF,
VICARE_MODE_FORCEDNORMAL: OPERATION_MODE_ON,
VICARE_MODE_OFF: OPERATION_MODE_OFF,
}
HA_TO_VICARE_HVAC_DHW = {
OPERATION_MODE_OFF: VICARE_MODE_OFF,
OPERATION_MODE_ON: VICARE_MODE_DHW,
}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Create the ViCare water_heater devices."""
if discovery_info is None:
return
vicare_api = hass.data[VICARE_DOMAIN][VICARE_API]
heating_type = hass.data[VICARE_DOMAIN][VICARE_HEATING_TYPE]
add_entities(
[
ViCareWater(
f"{hass.data[VICARE_DOMAIN][VICARE_NAME]} Water",
vicare_api,
heating_type,
)
]
)
class ViCareWater(WaterHeaterDevice):
"""Representation of the ViCare domestic hot water device."""
def __init__(self, name, api, heating_type):
"""Initialize the DHW water_heater device."""
self._name = name
self._state = None
self._api = api
self._attributes = {}
self._target_temperature = None
self._current_temperature = None
self._current_mode = None
self._heating_type = heating_type
def update(self):
"""Let HA know there has been an update from the ViCare API."""
current_temperature = self._api.getDomesticHotWaterStorageTemperature()
if current_temperature is not None and current_temperature != "error":
self._current_temperature = current_temperature
else:
self._current_temperature = None
self._target_temperature = self._api.getDomesticHotWaterConfiguredTemperature()
self._current_mode = self._api.getActiveMode()
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS_HEATER
@property
def name(self):
"""Return the name of the water_heater device."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
def set_temperature(self, **kwargs):
"""Set new target temperatures."""
temp = kwargs.get(ATTR_TEMPERATURE)
if temp is not None:
self._api.setDomesticHotWaterTemperature(self._target_temperature)
@property
def min_temp(self):
"""Return the minimum temperature."""
return VICARE_TEMP_WATER_MIN
@property
def max_temp(self):
"""Return the maximum temperature."""
return VICARE_TEMP_WATER_MAX
@property
def precision(self):
"""Return the precision of the system."""
return PRECISION_WHOLE
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
return VICARE_TO_HA_HVAC_DHW.get(self._current_mode)
@property
def operation_list(self):
"""Return the list of available operation modes."""
return list(HA_TO_VICARE_HVAC_DHW)
|
en
| 0.782186
|
Viessmann ViCare water_heater device. Create the ViCare water_heater devices. Representation of the ViCare domestic hot water device. Initialize the DHW water_heater device. Let HA know there has been an update from the ViCare API. Return the list of supported features. Return the name of the water_heater device. Return the unit of measurement. Return the current temperature. Return the temperature we try to reach. Set new target temperatures. Return the minimum temperature. Return the maximum temperature. Return the precision of the system. Return current operation ie. heat, cool, idle. Return the list of available operation modes.
| 2.46015
| 2
|
SCN_fractal_test.py
|
Tnorm/SCN
| 0
|
6625688
|
<reponame>Tnorm/SCN
from SCN import SCN
from Fractal_generator import koch, binary_frac
import torch
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt
import pickle
from scipy.stats import norm
direction = [0.0,float(1)/243]
#X, Y = koch([[0,0]], 5, direction)
X, Y = binary_frac([], 4, 0, 1)
X = torch.from_numpy(np.asarray(X, dtype=np.float32)).view(len(X), -1)
#X = torch.from_numpy(np.arange(0.0, 1.0, 0.005, dtype=np.float32)).view(len(np.arange(0.0, 1.0, 0.005)), -1)
#Y = torch.from_numpy(np.asarray(norm.pdf(X, 0.05, 0.1)/3 + norm.pdf(X, 0.95, 0.1)/3 + norm.pdf(X, 0.5, 0.2)/3 + norm.pdf(X, 0.35, 0.2)/3 + norm.pdf(X, 0.65, 0.2)/3 -
# norm.pdf(X, 0.25, 0.01)/140 - norm.pdf(X, 0.75, 0.01)/140 - norm.pdf(X, 0.5, 0.02)/50 - norm.pdf(X, 1.0, 0.01)/200 - norm.pdf(X, 0.0, 0.01)/200
# , dtype=np.float32) \
# ).view(len(np.arange(0.0, 1.0, 0.005)),-1)
X = X.type(torch.FloatTensor)# + torch.rand(X.size())*1/97
Y = torch.from_numpy(np.asarray(Y, dtype=np.float32)).view(len(Y), -1)
visible_units = Variable(torch.FloatTensor([0, 1]).view(2, -1))
batch_size = 100
input_dim = 1
iterations = 10000
experiments = 10
lr1 = 0.001
S = np.zeros(X.size()[0])
for experiment in range(experiments):
scn = SCN(2, 1, visible_units, 8)
optimizer = torch.optim.Adam(scn.parameters(), lr=lr1)
criterion = torch.nn.MSELoss()
for i in range(iterations):
sample_inds = np.random.choice(X.size()[0], batch_size)
samples = Variable(X[sample_inds])
y = Variable(Y[sample_inds])
output = scn(samples)[0].view(-1, 1)
loss = criterion(output, y)
#S[i] += loss.data[0]
loss.backward(retain_graph=True)
optimizer.step()
volatility = 1
for j in range(scn.depth):
scn.L[j].data = (scn.L[j].data - lr1*volatility * scn.L[j].grad.data).clamp(0.45,0.55)
scn.L[j].data = (scn.L[j].data / (scn.L[j].data.sum())).clamp(0, 1)
volatility*= 1.0
#scn.L[j].data = torch.ones(scn.L[j].size()) / 2
#scn.visible_fs.data = torch.zeros(scn.visible_fs.size())
#scn.biases.data = torch.zeros(scn.biases.size())
optimizer.zero_grad()
if i % 1000 == 0:
print(i)
pltx = X.view(-1, input_dim).numpy()
plty1 = scn(Variable(X))[0].data.view(-1, 1).numpy()
plty = Y.view(-1, 1).numpy()
#print(scn.biases.data)
plt.scatter(pltx, plty)
plt.scatter(pltx, plty1)
# plt.xlim(0, 1)
plt.pause(0.1)
plt.clf()
S = np.add(S, plty1.reshape(S.shape))
with open("scn_resf_3.txt", "wb") as fp: # Pickling
pickle.dump(S/experiments, fp)
#plt.plot(range(iterations), S)
plt.show()
|
from SCN import SCN
from Fractal_generator import koch, binary_frac
import torch
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt
import pickle
from scipy.stats import norm
direction = [0.0,float(1)/243]
#X, Y = koch([[0,0]], 5, direction)
X, Y = binary_frac([], 4, 0, 1)
X = torch.from_numpy(np.asarray(X, dtype=np.float32)).view(len(X), -1)
#X = torch.from_numpy(np.arange(0.0, 1.0, 0.005, dtype=np.float32)).view(len(np.arange(0.0, 1.0, 0.005)), -1)
#Y = torch.from_numpy(np.asarray(norm.pdf(X, 0.05, 0.1)/3 + norm.pdf(X, 0.95, 0.1)/3 + norm.pdf(X, 0.5, 0.2)/3 + norm.pdf(X, 0.35, 0.2)/3 + norm.pdf(X, 0.65, 0.2)/3 -
# norm.pdf(X, 0.25, 0.01)/140 - norm.pdf(X, 0.75, 0.01)/140 - norm.pdf(X, 0.5, 0.02)/50 - norm.pdf(X, 1.0, 0.01)/200 - norm.pdf(X, 0.0, 0.01)/200
# , dtype=np.float32) \
# ).view(len(np.arange(0.0, 1.0, 0.005)),-1)
X = X.type(torch.FloatTensor)# + torch.rand(X.size())*1/97
Y = torch.from_numpy(np.asarray(Y, dtype=np.float32)).view(len(Y), -1)
visible_units = Variable(torch.FloatTensor([0, 1]).view(2, -1))
batch_size = 100
input_dim = 1
iterations = 10000
experiments = 10
lr1 = 0.001
S = np.zeros(X.size()[0])
for experiment in range(experiments):
scn = SCN(2, 1, visible_units, 8)
optimizer = torch.optim.Adam(scn.parameters(), lr=lr1)
criterion = torch.nn.MSELoss()
for i in range(iterations):
sample_inds = np.random.choice(X.size()[0], batch_size)
samples = Variable(X[sample_inds])
y = Variable(Y[sample_inds])
output = scn(samples)[0].view(-1, 1)
loss = criterion(output, y)
#S[i] += loss.data[0]
loss.backward(retain_graph=True)
optimizer.step()
volatility = 1
for j in range(scn.depth):
scn.L[j].data = (scn.L[j].data - lr1*volatility * scn.L[j].grad.data).clamp(0.45,0.55)
scn.L[j].data = (scn.L[j].data / (scn.L[j].data.sum())).clamp(0, 1)
volatility*= 1.0
#scn.L[j].data = torch.ones(scn.L[j].size()) / 2
#scn.visible_fs.data = torch.zeros(scn.visible_fs.size())
#scn.biases.data = torch.zeros(scn.biases.size())
optimizer.zero_grad()
if i % 1000 == 0:
print(i)
pltx = X.view(-1, input_dim).numpy()
plty1 = scn(Variable(X))[0].data.view(-1, 1).numpy()
plty = Y.view(-1, 1).numpy()
#print(scn.biases.data)
plt.scatter(pltx, plty)
plt.scatter(pltx, plty1)
# plt.xlim(0, 1)
plt.pause(0.1)
plt.clf()
S = np.add(S, plty1.reshape(S.shape))
with open("scn_resf_3.txt", "wb") as fp: # Pickling
pickle.dump(S/experiments, fp)
#plt.plot(range(iterations), S)
plt.show()
|
en
| 0.209839
|
#X, Y = koch([[0,0]], 5, direction) #X = torch.from_numpy(np.arange(0.0, 1.0, 0.005, dtype=np.float32)).view(len(np.arange(0.0, 1.0, 0.005)), -1) #Y = torch.from_numpy(np.asarray(norm.pdf(X, 0.05, 0.1)/3 + norm.pdf(X, 0.95, 0.1)/3 + norm.pdf(X, 0.5, 0.2)/3 + norm.pdf(X, 0.35, 0.2)/3 + norm.pdf(X, 0.65, 0.2)/3 - # norm.pdf(X, 0.25, 0.01)/140 - norm.pdf(X, 0.75, 0.01)/140 - norm.pdf(X, 0.5, 0.02)/50 - norm.pdf(X, 1.0, 0.01)/200 - norm.pdf(X, 0.0, 0.01)/200 # , dtype=np.float32) \ # ).view(len(np.arange(0.0, 1.0, 0.005)),-1) # + torch.rand(X.size())*1/97 #S[i] += loss.data[0] #scn.L[j].data = torch.ones(scn.L[j].size()) / 2 #scn.visible_fs.data = torch.zeros(scn.visible_fs.size()) #scn.biases.data = torch.zeros(scn.biases.size()) #print(scn.biases.data) # plt.xlim(0, 1) # Pickling #plt.plot(range(iterations), S)
| 2.006572
| 2
|
kaptan/__about__.py
|
sunpoet/kaptan
| 134
|
6625689
|
<reponame>sunpoet/kaptan<gh_stars>100-1000
__title__ = 'kaptan'
__package_name__ = 'kaptan'
__version__ = '0.5.12'
__description__ = 'Configuration manager'
__email__ = '<EMAIL>'
__url__ = 'https://github.com/emre/kaptan'
__author__ = '<NAME>'
__license__ = 'BSD'
__copyright__ = 'Copyright 2013-2019 <NAME>'
|
__title__ = 'kaptan'
__package_name__ = 'kaptan'
__version__ = '0.5.12'
__description__ = 'Configuration manager'
__email__ = '<EMAIL>'
__url__ = 'https://github.com/emre/kaptan'
__author__ = '<NAME>'
__license__ = 'BSD'
__copyright__ = 'Copyright 2013-2019 <NAME>'
|
none
| 1
| 0.935269
| 1
|
|
youTubedownloadalex2.py
|
unis369/Mytubedownload
| 0
|
6625690
|
<filename>youTubedownloadalex2.py
# from pytube import YouTube
import pytube
with open('./list.txt', 'r') as f_stream:
count = 1
line = f_stream.readline()
while line:
print(count, line)
yt = pytube.YouTube(line)
yt.streams.first().download(filename='test' + str(count))
count += 1
line = f_stream.readline()
|
<filename>youTubedownloadalex2.py
# from pytube import YouTube
import pytube
with open('./list.txt', 'r') as f_stream:
count = 1
line = f_stream.readline()
while line:
print(count, line)
yt = pytube.YouTube(line)
yt.streams.first().download(filename='test' + str(count))
count += 1
line = f_stream.readline()
|
en
| 0.390796
|
# from pytube import YouTube
| 2.903382
| 3
|
virtual/bin/django-admin.py
|
Tyra-hans/neighbourhood-blitz
| 0
|
6625691
|
#!/home/tyra/Desktop/MS-Python-Pre-work/django/neighbourhood/virtual/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
#!/home/tyra/Desktop/MS-Python-Pre-work/django/neighbourhood/virtual/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
en
| 0.725312
|
#!/home/tyra/Desktop/MS-Python-Pre-work/django/neighbourhood/virtual/bin/python
| 1.049699
| 1
|
neo/core/analogsignalarray.py
|
guangxingli/python-neo
| 0
|
6625692
|
# -*- coding: utf-8 -*-
'''
This module implements :class:`AnalogSignalArray`, an array of analog signals.
:class:`AnalogSignalArray` derives from :class:`BaseAnalogSignal`, from
:module:`neo.core.analogsignal`.
:class:`BaseAnalogSignal` inherits from :class:`quantites.Quantity`, which
inherits from :class:`numpy.array`.
Inheritance from :class:`numpy.array` is explained here:
http://docs.scipy.org/doc/numpy/user/basics.subclassing.html
In brief:
* Initialization of a new object from constructor happens in :meth:`__new__`.
This is where user-specified attributes are set.
* :meth:`__array_finalize__` is called for all new objects, including those
created by slicing. This is where attributes are copied over from
the old object.
'''
# needed for python 3 compatibility
from __future__ import absolute_import, division, print_function
import logging
import numpy as np
import quantities as pq
from neo.core.analogsignal import (BaseAnalogSignal, AnalogSignal,
_get_sampling_rate)
from neo.core.baseneo import BaseNeo, merge_annotations
logger = logging.getLogger("Neo")
class AnalogSignalArray(BaseAnalogSignal):
'''
Several continuous analog signals
A representation of several continuous, analog signals that
have the same duration, sampling rate and start time.
Basically, it is a 2D array like AnalogSignal: dim 0 is time, dim 1 is
channel index
Inherits from :class:`quantities.Quantity`, which in turn inherits from
:class:`numpy.ndarray`.
*Usage*::
>>> from neo.core import AnalogSignalArray
>>> import quantities as pq
>>>
>>> sigarr = AnalogSignalArray([[1, 2, 3], [4, 5, 6]], units='V',
... sampling_rate=1*pq.Hz)
>>>
>>> sigarr
<AnalogSignalArray(array([[1, 2, 3],
[4, 5, 6]]) * mV, [0.0 s, 2.0 s], sampling rate: 1.0 Hz)>
>>> sigarr[:,1]
<AnalogSignal(array([2, 5]) * V, [0.0 s, 2.0 s],
sampling rate: 1.0 Hz)>
>>> sigarr[1, 1]
array(5) * V
*Required attributes/properties*:
:signal: (quantity array 2D, numpy array 2D, or list (data, chanel))
The data itself.
:units: (quantity units) Required if the signal is a list or NumPy
array, not if it is a :class:`Quantity`
:t_start: (quantity scalar) Time when signal begins
:sampling_rate: *or* :sampling_period: (quantity scalar) Number of
samples per unit time or
interval between two samples.
If both are specified, they are
checked for consistency.
*Recommended attributes/properties*:
:name: (str) A label for the dataset.
:description: (str) Text description.
:file_origin: (str) Filesystem path or URL of the original data file.
:channel_index: (numpy array 1D dtype='i') You can use this to order
the columns of the signal in any way you want. It should have the
same number of elements as the signal has columns.
:class:`AnalogSignal` and :class:`Unit` objects can be given
indexes as well so related objects can be linked together.
*Optional attributes/properties*:
:dtype: (numpy dtype or str) Override the dtype of the signal array.
:copy: (bool) True by default.
Note: Any other additional arguments are assumed to be user-specific
metadata and stored in :attr:`annotations`.
*Properties available on this object*:
:sampling_rate: (quantity scalar) Number of samples per unit time.
(1/:attr:`sampling_period`)
:sampling_period: (quantity scalar) Interval between two samples.
(1/:attr:`quantity scalar`)
:duration: (Quantity) Signal duration, read-only.
(size * :attr:`sampling_period`)
:t_stop: (quantity scalar) Time when signal ends, read-only.
(:attr:`t_start` + :attr:`duration`)
:times: (quantity 1D) The time points of each sample of the signal,
read-only.
(:attr:`t_start` + arange(:attr:`shape`[0])/:attr:`sampling_rate`)
:channel_indexes: (numpy array 1D dtype='i') The same as
:attr:`channel_index`, read-only.
*Slicing*:
:class:`AnalogSignalArray` objects can be sliced. When taking a single
row (dimension 1, e.g. [:, 0]), a :class:`AnalogSignal` is returned.
When taking a single element, a :class:`~quantities.Quantity` is
returned. Otherwise a :class:`AnalogSignalArray` (actually a view) is
returned, with the same metadata, except that :attr:`t_start`
is changed if the start index along dimension 1 is greater than 1.
Getting a single item returns a :class:`~quantity.Quantity` scalar.
*Operations available on this object*:
== != + * /
'''
_single_parent_objects = ('Segment', 'RecordingChannelGroup')
_quantity_attr = 'signal'
_necessary_attrs = (('signal', pq.Quantity, 2),
('sampling_rate', pq.Quantity, 0),
('t_start', pq.Quantity, 0))
_recommended_attrs = ((('channel_index', np.ndarray, 1, np.dtype('i')),) +
BaseNeo._recommended_attrs)
def __new__(cls, signal, units=None, dtype=None, copy=True,
t_start=0 * pq.s, sampling_rate=None, sampling_period=None,
name=None, file_origin=None, description=None,
channel_index=None, **annotations):
'''
Constructs new :class:`AnalogSignalArray` from data.
This is called whenever a new class:`AnalogSignalArray` is created from
the constructor, but not when slicing.
'''
if units is None:
if not hasattr(signal, "units"):
raise ValueError("Units must be specified")
elif isinstance(signal, pq.Quantity):
# could improve this test, what if units is a string?
if units != signal.units:
signal = signal.rescale(units)
obj = pq.Quantity(signal, units=units, dtype=dtype,
copy=copy).view(cls)
obj.t_start = t_start
obj.sampling_rate = _get_sampling_rate(sampling_rate, sampling_period)
obj.channel_index = channel_index
obj.segment = None
obj.recordingchannelgroup = None
return obj
def __init__(self, signal, units=None, dtype=None, copy=True,
t_start=0 * pq.s, sampling_rate=None, sampling_period=None,
name=None, file_origin=None, description=None,
channel_index=None, **annotations):
'''
Initializes a newly constructed :class:`AnalogSignalArray` instance.
'''
BaseNeo.__init__(self, name=name, file_origin=file_origin,
description=description, **annotations)
@property
def channel_indexes(self):
'''
The same as :attr:`channel_index`.
'''
return self.channel_index
def __getslice__(self, i, j):
'''
Get a slice from :attr:`i` to :attr:`j`.
Doesn't get called in Python 3, :meth:`__getitem__` is called instead
'''
return self.__getitem__(slice(i, j))
def __getitem__(self, i):
'''
Get the item or slice :attr:`i`.
'''
obj = super(BaseAnalogSignal, self).__getitem__(i)
if isinstance(i, int):
return obj
elif isinstance(i, tuple):
j, k = i
if isinstance(k, int):
if isinstance(j, slice): # extract an AnalogSignal
obj = AnalogSignal(obj, sampling_rate=self.sampling_rate)
if j.start:
obj.t_start = (self.t_start +
j.start * self.sampling_period)
# return a Quantity (for some reason quantities does not
# return a Quantity in this case)
elif isinstance(j, int):
obj = pq.Quantity(obj, units=self.units)
return obj
elif isinstance(j, int): # extract a quantity array
# should be a better way to do this
obj = pq.Quantity(np.array(obj), units=obj.units)
return obj
else:
return obj
elif isinstance(i, slice):
if i.start:
obj.t_start = self.t_start + i.start * self.sampling_period
return obj
else:
raise IndexError("index should be an integer, tuple or slice")
def time_slice(self, t_start, t_stop):
'''
Creates a new AnalogSignal corresponding to the time slice of the
original AnalogSignal between times t_start, t_stop. Note, that for
numerical stability reasons if t_start, t_stop do not fall exactly on
the time bins defined by the sampling_period they will be rounded to
the nearest sampling bins.
'''
# checking start time and transforming to start index
if t_start is None:
i = 0
else:
t_start = t_start.rescale(self.sampling_period.units)
i = (t_start - self.t_start) / self.sampling_period
i = int(np.rint(i.magnitude))
# checking stop time and transforming to stop index
if t_stop is None:
j = len(self)
else:
t_stop = t_stop.rescale(self.sampling_period.units)
j = (t_stop - self.t_start) / self.sampling_period
j = int(np.rint(j.magnitude))
if (i < 0) or (j > len(self)):
raise ValueError('t_start, t_stop have to be withing the analog \
signal duration')
# we're going to send the list of indicies so that we get *copy* of the
# sliced data
obj = super(BaseAnalogSignal, self).__getitem__(np.arange(i, j, 1))
obj.t_start = self.t_start + i * self.sampling_period
return obj
def merge(self, other):
'''
Merge the another :class:`AnalogSignalArray` into this one.
The :class:`AnalogSignalArray` objects are concatenated horizontally
(column-wise, :func:`np.hstack`).
If the attributes of the two :class:`AnalogSignalArray` are not
compatible, and Exception is raised.
'''
assert self.sampling_rate == other.sampling_rate
assert self.t_start == other.t_start
if other.units != self.units:
other = other.rescale(self.units)
stack = np.hstack(map(np.array, (self, other)))
kwargs = {}
for name in ("name", "description", "file_origin"):
attr_self = getattr(self, name)
attr_other = getattr(other, name)
if attr_self == attr_other:
kwargs[name] = attr_self
else:
kwargs[name] = "merge(%s, %s)" % (attr_self, attr_other)
if self.channel_index is None:
channel_index = other.channel_index
elif other.channel_index is None:
channel_index = self.channel_index
else:
channel_index = np.append(self.channel_index,
other.channel_index)
merged_annotations = merge_annotations(self.annotations,
other.annotations)
kwargs.update(merged_annotations)
return AnalogSignalArray(stack, units=self.units, dtype=self.dtype,
copy=False, t_start=self.t_start,
sampling_rate=self.sampling_rate,
channel_index=channel_index,
**kwargs)
|
# -*- coding: utf-8 -*-
'''
This module implements :class:`AnalogSignalArray`, an array of analog signals.
:class:`AnalogSignalArray` derives from :class:`BaseAnalogSignal`, from
:module:`neo.core.analogsignal`.
:class:`BaseAnalogSignal` inherits from :class:`quantites.Quantity`, which
inherits from :class:`numpy.array`.
Inheritance from :class:`numpy.array` is explained here:
http://docs.scipy.org/doc/numpy/user/basics.subclassing.html
In brief:
* Initialization of a new object from constructor happens in :meth:`__new__`.
This is where user-specified attributes are set.
* :meth:`__array_finalize__` is called for all new objects, including those
created by slicing. This is where attributes are copied over from
the old object.
'''
# needed for python 3 compatibility
from __future__ import absolute_import, division, print_function
import logging
import numpy as np
import quantities as pq
from neo.core.analogsignal import (BaseAnalogSignal, AnalogSignal,
_get_sampling_rate)
from neo.core.baseneo import BaseNeo, merge_annotations
logger = logging.getLogger("Neo")
class AnalogSignalArray(BaseAnalogSignal):
'''
Several continuous analog signals
A representation of several continuous, analog signals that
have the same duration, sampling rate and start time.
Basically, it is a 2D array like AnalogSignal: dim 0 is time, dim 1 is
channel index
Inherits from :class:`quantities.Quantity`, which in turn inherits from
:class:`numpy.ndarray`.
*Usage*::
>>> from neo.core import AnalogSignalArray
>>> import quantities as pq
>>>
>>> sigarr = AnalogSignalArray([[1, 2, 3], [4, 5, 6]], units='V',
... sampling_rate=1*pq.Hz)
>>>
>>> sigarr
<AnalogSignalArray(array([[1, 2, 3],
[4, 5, 6]]) * mV, [0.0 s, 2.0 s], sampling rate: 1.0 Hz)>
>>> sigarr[:,1]
<AnalogSignal(array([2, 5]) * V, [0.0 s, 2.0 s],
sampling rate: 1.0 Hz)>
>>> sigarr[1, 1]
array(5) * V
*Required attributes/properties*:
:signal: (quantity array 2D, numpy array 2D, or list (data, chanel))
The data itself.
:units: (quantity units) Required if the signal is a list or NumPy
array, not if it is a :class:`Quantity`
:t_start: (quantity scalar) Time when signal begins
:sampling_rate: *or* :sampling_period: (quantity scalar) Number of
samples per unit time or
interval between two samples.
If both are specified, they are
checked for consistency.
*Recommended attributes/properties*:
:name: (str) A label for the dataset.
:description: (str) Text description.
:file_origin: (str) Filesystem path or URL of the original data file.
:channel_index: (numpy array 1D dtype='i') You can use this to order
the columns of the signal in any way you want. It should have the
same number of elements as the signal has columns.
:class:`AnalogSignal` and :class:`Unit` objects can be given
indexes as well so related objects can be linked together.
*Optional attributes/properties*:
:dtype: (numpy dtype or str) Override the dtype of the signal array.
:copy: (bool) True by default.
Note: Any other additional arguments are assumed to be user-specific
metadata and stored in :attr:`annotations`.
*Properties available on this object*:
:sampling_rate: (quantity scalar) Number of samples per unit time.
(1/:attr:`sampling_period`)
:sampling_period: (quantity scalar) Interval between two samples.
(1/:attr:`quantity scalar`)
:duration: (Quantity) Signal duration, read-only.
(size * :attr:`sampling_period`)
:t_stop: (quantity scalar) Time when signal ends, read-only.
(:attr:`t_start` + :attr:`duration`)
:times: (quantity 1D) The time points of each sample of the signal,
read-only.
(:attr:`t_start` + arange(:attr:`shape`[0])/:attr:`sampling_rate`)
:channel_indexes: (numpy array 1D dtype='i') The same as
:attr:`channel_index`, read-only.
*Slicing*:
:class:`AnalogSignalArray` objects can be sliced. When taking a single
row (dimension 1, e.g. [:, 0]), a :class:`AnalogSignal` is returned.
When taking a single element, a :class:`~quantities.Quantity` is
returned. Otherwise a :class:`AnalogSignalArray` (actually a view) is
returned, with the same metadata, except that :attr:`t_start`
is changed if the start index along dimension 1 is greater than 1.
Getting a single item returns a :class:`~quantity.Quantity` scalar.
*Operations available on this object*:
== != + * /
'''
_single_parent_objects = ('Segment', 'RecordingChannelGroup')
_quantity_attr = 'signal'
_necessary_attrs = (('signal', pq.Quantity, 2),
('sampling_rate', pq.Quantity, 0),
('t_start', pq.Quantity, 0))
_recommended_attrs = ((('channel_index', np.ndarray, 1, np.dtype('i')),) +
BaseNeo._recommended_attrs)
def __new__(cls, signal, units=None, dtype=None, copy=True,
t_start=0 * pq.s, sampling_rate=None, sampling_period=None,
name=None, file_origin=None, description=None,
channel_index=None, **annotations):
'''
Constructs new :class:`AnalogSignalArray` from data.
This is called whenever a new class:`AnalogSignalArray` is created from
the constructor, but not when slicing.
'''
if units is None:
if not hasattr(signal, "units"):
raise ValueError("Units must be specified")
elif isinstance(signal, pq.Quantity):
# could improve this test, what if units is a string?
if units != signal.units:
signal = signal.rescale(units)
obj = pq.Quantity(signal, units=units, dtype=dtype,
copy=copy).view(cls)
obj.t_start = t_start
obj.sampling_rate = _get_sampling_rate(sampling_rate, sampling_period)
obj.channel_index = channel_index
obj.segment = None
obj.recordingchannelgroup = None
return obj
def __init__(self, signal, units=None, dtype=None, copy=True,
t_start=0 * pq.s, sampling_rate=None, sampling_period=None,
name=None, file_origin=None, description=None,
channel_index=None, **annotations):
'''
Initializes a newly constructed :class:`AnalogSignalArray` instance.
'''
BaseNeo.__init__(self, name=name, file_origin=file_origin,
description=description, **annotations)
@property
def channel_indexes(self):
'''
The same as :attr:`channel_index`.
'''
return self.channel_index
def __getslice__(self, i, j):
'''
Get a slice from :attr:`i` to :attr:`j`.
Doesn't get called in Python 3, :meth:`__getitem__` is called instead
'''
return self.__getitem__(slice(i, j))
def __getitem__(self, i):
'''
Get the item or slice :attr:`i`.
'''
obj = super(BaseAnalogSignal, self).__getitem__(i)
if isinstance(i, int):
return obj
elif isinstance(i, tuple):
j, k = i
if isinstance(k, int):
if isinstance(j, slice): # extract an AnalogSignal
obj = AnalogSignal(obj, sampling_rate=self.sampling_rate)
if j.start:
obj.t_start = (self.t_start +
j.start * self.sampling_period)
# return a Quantity (for some reason quantities does not
# return a Quantity in this case)
elif isinstance(j, int):
obj = pq.Quantity(obj, units=self.units)
return obj
elif isinstance(j, int): # extract a quantity array
# should be a better way to do this
obj = pq.Quantity(np.array(obj), units=obj.units)
return obj
else:
return obj
elif isinstance(i, slice):
if i.start:
obj.t_start = self.t_start + i.start * self.sampling_period
return obj
else:
raise IndexError("index should be an integer, tuple or slice")
def time_slice(self, t_start, t_stop):
'''
Creates a new AnalogSignal corresponding to the time slice of the
original AnalogSignal between times t_start, t_stop. Note, that for
numerical stability reasons if t_start, t_stop do not fall exactly on
the time bins defined by the sampling_period they will be rounded to
the nearest sampling bins.
'''
# checking start time and transforming to start index
if t_start is None:
i = 0
else:
t_start = t_start.rescale(self.sampling_period.units)
i = (t_start - self.t_start) / self.sampling_period
i = int(np.rint(i.magnitude))
# checking stop time and transforming to stop index
if t_stop is None:
j = len(self)
else:
t_stop = t_stop.rescale(self.sampling_period.units)
j = (t_stop - self.t_start) / self.sampling_period
j = int(np.rint(j.magnitude))
if (i < 0) or (j > len(self)):
raise ValueError('t_start, t_stop have to be withing the analog \
signal duration')
# we're going to send the list of indicies so that we get *copy* of the
# sliced data
obj = super(BaseAnalogSignal, self).__getitem__(np.arange(i, j, 1))
obj.t_start = self.t_start + i * self.sampling_period
return obj
def merge(self, other):
'''
Merge the another :class:`AnalogSignalArray` into this one.
The :class:`AnalogSignalArray` objects are concatenated horizontally
(column-wise, :func:`np.hstack`).
If the attributes of the two :class:`AnalogSignalArray` are not
compatible, and Exception is raised.
'''
assert self.sampling_rate == other.sampling_rate
assert self.t_start == other.t_start
if other.units != self.units:
other = other.rescale(self.units)
stack = np.hstack(map(np.array, (self, other)))
kwargs = {}
for name in ("name", "description", "file_origin"):
attr_self = getattr(self, name)
attr_other = getattr(other, name)
if attr_self == attr_other:
kwargs[name] = attr_self
else:
kwargs[name] = "merge(%s, %s)" % (attr_self, attr_other)
if self.channel_index is None:
channel_index = other.channel_index
elif other.channel_index is None:
channel_index = self.channel_index
else:
channel_index = np.append(self.channel_index,
other.channel_index)
merged_annotations = merge_annotations(self.annotations,
other.annotations)
kwargs.update(merged_annotations)
return AnalogSignalArray(stack, units=self.units, dtype=self.dtype,
copy=False, t_start=self.t_start,
sampling_rate=self.sampling_rate,
channel_index=channel_index,
**kwargs)
|
en
| 0.711186
|
# -*- coding: utf-8 -*- This module implements :class:`AnalogSignalArray`, an array of analog signals. :class:`AnalogSignalArray` derives from :class:`BaseAnalogSignal`, from :module:`neo.core.analogsignal`. :class:`BaseAnalogSignal` inherits from :class:`quantites.Quantity`, which inherits from :class:`numpy.array`. Inheritance from :class:`numpy.array` is explained here: http://docs.scipy.org/doc/numpy/user/basics.subclassing.html In brief: * Initialization of a new object from constructor happens in :meth:`__new__`. This is where user-specified attributes are set. * :meth:`__array_finalize__` is called for all new objects, including those created by slicing. This is where attributes are copied over from the old object. # needed for python 3 compatibility Several continuous analog signals A representation of several continuous, analog signals that have the same duration, sampling rate and start time. Basically, it is a 2D array like AnalogSignal: dim 0 is time, dim 1 is channel index Inherits from :class:`quantities.Quantity`, which in turn inherits from :class:`numpy.ndarray`. *Usage*:: >>> from neo.core import AnalogSignalArray >>> import quantities as pq >>> >>> sigarr = AnalogSignalArray([[1, 2, 3], [4, 5, 6]], units='V', ... sampling_rate=1*pq.Hz) >>> >>> sigarr <AnalogSignalArray(array([[1, 2, 3], [4, 5, 6]]) * mV, [0.0 s, 2.0 s], sampling rate: 1.0 Hz)> >>> sigarr[:,1] <AnalogSignal(array([2, 5]) * V, [0.0 s, 2.0 s], sampling rate: 1.0 Hz)> >>> sigarr[1, 1] array(5) * V *Required attributes/properties*: :signal: (quantity array 2D, numpy array 2D, or list (data, chanel)) The data itself. :units: (quantity units) Required if the signal is a list or NumPy array, not if it is a :class:`Quantity` :t_start: (quantity scalar) Time when signal begins :sampling_rate: *or* :sampling_period: (quantity scalar) Number of samples per unit time or interval between two samples. If both are specified, they are checked for consistency. *Recommended attributes/properties*: :name: (str) A label for the dataset. :description: (str) Text description. :file_origin: (str) Filesystem path or URL of the original data file. :channel_index: (numpy array 1D dtype='i') You can use this to order the columns of the signal in any way you want. It should have the same number of elements as the signal has columns. :class:`AnalogSignal` and :class:`Unit` objects can be given indexes as well so related objects can be linked together. *Optional attributes/properties*: :dtype: (numpy dtype or str) Override the dtype of the signal array. :copy: (bool) True by default. Note: Any other additional arguments are assumed to be user-specific metadata and stored in :attr:`annotations`. *Properties available on this object*: :sampling_rate: (quantity scalar) Number of samples per unit time. (1/:attr:`sampling_period`) :sampling_period: (quantity scalar) Interval between two samples. (1/:attr:`quantity scalar`) :duration: (Quantity) Signal duration, read-only. (size * :attr:`sampling_period`) :t_stop: (quantity scalar) Time when signal ends, read-only. (:attr:`t_start` + :attr:`duration`) :times: (quantity 1D) The time points of each sample of the signal, read-only. (:attr:`t_start` + arange(:attr:`shape`[0])/:attr:`sampling_rate`) :channel_indexes: (numpy array 1D dtype='i') The same as :attr:`channel_index`, read-only. *Slicing*: :class:`AnalogSignalArray` objects can be sliced. When taking a single row (dimension 1, e.g. [:, 0]), a :class:`AnalogSignal` is returned. When taking a single element, a :class:`~quantities.Quantity` is returned. Otherwise a :class:`AnalogSignalArray` (actually a view) is returned, with the same metadata, except that :attr:`t_start` is changed if the start index along dimension 1 is greater than 1. Getting a single item returns a :class:`~quantity.Quantity` scalar. *Operations available on this object*: == != + * / Constructs new :class:`AnalogSignalArray` from data. This is called whenever a new class:`AnalogSignalArray` is created from the constructor, but not when slicing. # could improve this test, what if units is a string? Initializes a newly constructed :class:`AnalogSignalArray` instance. The same as :attr:`channel_index`. Get a slice from :attr:`i` to :attr:`j`. Doesn't get called in Python 3, :meth:`__getitem__` is called instead Get the item or slice :attr:`i`. # extract an AnalogSignal # return a Quantity (for some reason quantities does not # return a Quantity in this case) # extract a quantity array # should be a better way to do this Creates a new AnalogSignal corresponding to the time slice of the original AnalogSignal between times t_start, t_stop. Note, that for numerical stability reasons if t_start, t_stop do not fall exactly on the time bins defined by the sampling_period they will be rounded to the nearest sampling bins. # checking start time and transforming to start index # checking stop time and transforming to stop index # we're going to send the list of indicies so that we get *copy* of the # sliced data Merge the another :class:`AnalogSignalArray` into this one. The :class:`AnalogSignalArray` objects are concatenated horizontally (column-wise, :func:`np.hstack`). If the attributes of the two :class:`AnalogSignalArray` are not compatible, and Exception is raised.
| 2.309999
| 2
|
influxdb/influxdbplugin.py
|
Odianosen25/appdaemon_custom_plugins
| 1
|
6625693
|
import asyncio
import copy
from influxdb_client import InfluxDBClient
from influxdb_client.client.write_api import SYNCHRONOUS
from datetime import datetime, timedelta
import iso8601
from appdaemon.appdaemon import AppDaemon
from appdaemon.plugin_management import PluginBase
import appdaemon.utils as utils
import traceback
CONST_TRUE_STATES = ("on", "y", "yes", "true", "home", "opened", "unlocked", True)
CONST_FALSE_STATES = ("off", "n", "no", "false", "away", "closed", "locked", False)
class InfluxdbPlugin(PluginBase):
def __init__(self, ad: AppDaemon, name, args):
super().__init__(ad, name, args)
self.AD = ad
self.stopping = False
self.config = args
self.name = name
self.initialized = False
self.state = {}
self._namespaces = {}
self._client = None
self._write_api = None
self._query_api = None
if "namespace" in self.config:
self.namespace = self.config["namespace"]
else:
self.namespace = "default"
self.logger.info("Influx Database Plugin Initializing")
self._connection_url = self.config.get("connection_url", "http://127.0.0.1:8086")
self._databases = self.config.get("databases", {})
self._bucket = self.config.get("bucket")
self._org = self.config.get("org")
self._token = self.config.get("token")
if not all([self._org, self._token]):
raise ValueError("Cannot setup the Plugin, as all 'org' and 'token' settings must be given")
if not isinstance(self._databases, dict):
raise ValueError("The database setting is not Valid")
self._timeout = self.config.get("timeout")
self._connection_pool_maxsize = int(self.config.get("connection_pool_maxsize", 100))
self._verify_ssl = self.config.get("verify_ssl", False)
self._ssl_ca_cert = self.config.get("ssl_ca_cert")
if self._connection_pool_maxsize < 5:
self.logger.warning(
"Cannot use %s for Connection Pool, must be higher than 5. Reverting to 100",
self._connection_pool_maxsize,
)
self._connection_pool_maxsize = 100
self.loop = self.AD.loop # get AD loop
self.database_metadata = {
"version": "1.0",
"connection_url": self._connection_url,
"bucket": self._bucket,
"org": self._org,
"timeout": self._timeout,
"verify_ssl": self._verify_ssl,
"ssl_ca_cert": self._ssl_ca_cert,
}
def stop(self):
self.logger.debug("stop() called for %s", self.name)
self.stopping = True
# set to continue
self._event.set()
self.logger.info("Stopping Influx Database Plugin")
if self._client:
self._client.close()
#
# Placeholder for constraints
#
def list_constraints(self):
return []
#
# Get initial state
#
async def get_complete_state(self):
self.logger.debug("*** Sending Complete State: %s ***", self.state)
return copy.deepcopy(self.state)
async def get_metadata(self):
return self.database_metadata
#
# Utility gets called every second (or longer if configured
# Allows plugin to do any housekeeping required
#
def utility(self):
# self.logger.info("*** Utility ***".format(self.state))
return
#
# Handle state updates
#
async def get_updates(self):
already_notified = False
first_time = True
self.reading = False
self._event = asyncio.Event()
# set to continue
self._event.set()
while not self.stopping:
await self._event.wait()
if self.stopping is True:
return
try:
if self._client is None: # it has not been set
client_options = {"connection_pool_maxsize": self._connection_pool_maxsize}
if self._timeout is not None:
client_options["timeout"] = self._timeout
if self._verify_ssl is True:
client_options["verify_ssl"] = True
if self._ssl_ca_cert is not None:
client_options["ssl_ca_cert"] = self._ssl_ca_cert
self._client = await utils.run_in_executor(
self,
InfluxDBClient,
url=self._connection_url,
token=self._token,
org=self._org,
**client_options,
)
self._write_api = self._client.write_api(write_options=SYNCHRONOUS)
self._query_api = self._client.query_api()
if self._client is not None:
self.logger.info("Connected to Database using URL %s", self._connection_url)
states = await self.get_complete_state()
self.AD.services.register_service(
self.namespace, "influx", "write", self.call_plugin_service,
)
self.AD.services.register_service(
self.namespace, "influx", "read", self.call_plugin_service,
)
self.AD.services.register_service(
self.namespace, "influx", "get_history", self.call_plugin_service,
)
# now we register for the different namespaces
for ns, settings in self._databases.items():
if ns not in self._namespaces:
self._namespaces[ns] = {}
# we check for tags
ns_tags = settings.get("tags", [])
if isinstance(ns_tags, str):
ns_tags = [ns_tags]
self._namespaces[ns]["tags"] = ns_tags
self._namespaces[ns]["bucket"] = settings.get("bucket", self._bucket)
self._namespaces[ns]["handle"] = await self.AD.events.add_event_callback(
self.name, ns, self.event_callback, "state_changed", __silent=True, __namespace=ns,
)
await self.AD.plugins.notify_plugin_started(
self.name, self.namespace, self.database_metadata, states, first_time,
)
first_time = False
already_notified = False
self._event.clear() # it should stop
elif already_notified is False:
await self.AD.plugins.notify_plugin_stopped(self.name, self.namespace)
already_notified = True
self.logger.warning("Could not connect to the Database, will attempt in 5 seconds")
except Exception as e:
self.logger.error("-" * 60)
self.logger.error(
"Could not setup connection to database %s", self._connection_url,
)
self.logger.error("-" * 60)
self.logger.error(e)
self.logger.debug(traceback.format_exc())
self.logger.error("-" * 60)
self._client = None
await asyncio.sleep(5)
async def event_callback(self, event, data, kwargs):
self.logger.debug("event_callback: %s %s %s", kwargs, event, data)
_namespace = kwargs["__namespace"]
entity_id = data["entity_id"]
if not await self.check_entity_id(_namespace, entity_id):
return
if data["new_state"]["state"] == data["old_state"].get("state"):
# nothing changed
return
bucket = self._namespaces[_namespace]["bucket"] # get the databases in this namespace
tags = self._namespaces[_namespace]["tags"]
state = data["new_state"]["state"]
try:
state = float(state)
except Exception:
if state in CONST_TRUE_STATES:
state = 1.0
elif state in CONST_FALSE_STATES:
state = 0.0
else:
self.logger.warning(
f"Could not map {state} for {entity_id} Entity_ID to any valid data, and so will be ignored"
)
return
attributes = data["new_state"]["attributes"]
friendly_name = attributes["friendly_name"]
domain, _ = entity_id.split(".")
lc = data["new_state"].get("last_changed")
if lc is None:
last_changed = await self.AD.sched.get_now()
else:
last_changed = iso8601.parse_date(lc)
write_tags = {"entity_id": entity_id}
for tag in tags:
if tag in attributes:
write_tags[tag] = attributes[tag]
fields = {domain: state}
if self.stopping is False:
asyncio.create_task(
self.database_write(
bucket, measurement=friendly_name, tags=write_tags, fields=fields, timestamp=last_changed
)
)
#
# Service Call
#
async def call_plugin_service(self, namespace, domain, service, kwargs):
self.logger.debug(
"call_plugin_service() namespace=%s domain=%s service=%s kwargs=%s", namespace, domain, service, kwargs,
)
res = None
bucket = kwargs.pop("bucket", self._bucket)
if bucket is None:
raise ValueError("Bucket must be given to execute the service call %s", service)
if service == "write":
asyncio.create_task(self.database_write(bucket, **kwargs))
elif service == "read":
res = await self.database_read(bucket, **kwargs)
elif service == "get_history":
return await self.get_history(**kwargs)
return res
async def database_write(self, bucket, **kwargs):
"""Used to execute a database query"""
executed = False
measurement = kwargs.get("measurement")
tags = kwargs.get("tags")
fields = kwargs.get("fields")
ts = kwargs.get("timestamp", await self.AD.sched.get_now())
try:
write_data = {}
if measurement is not None:
write_data["measurement"] = measurement
if isinstance(tags, dict):
write_data["tags"] = tags
if isinstance(fields, dict):
write_data["fields"] = fields
write_data["time"] = ts
await asyncio.wait_for(
utils.run_in_executor(self, self._write_api.write, bucket, self._org, write_data), timeout=5
)
except Exception as e:
self.logger.error("-" * 60)
self.logger.error("Could not execute database write. %s %s", bucket, kwargs)
self.logger.error("-" * 60)
self.logger.error(e)
self.logger.debug(traceback.format_exc())
self.logger.error("-" * 60)
return executed
async def database_read(self, bucket, **kwargs):
"""Used to fetch data from a database"""
res = []
query = kwargs.get("query")
params = kwargs.get("params")
if query is not None and isinstance(params, dict):
try:
tables = await utils.run_in_executor(self, self._query_api.query, query, params=params)
for table in tables:
for record in table.records:
res.append(record)
except Exception as e:
self.logger.error("-" * 60)
self.logger.error("Could not execute database read for query %s", query)
self.logger.error("-" * 60)
self.logger.error(e)
self.logger.debug(traceback.format_exc())
self.logger.error("-" * 60)
else:
self.logger.warning("Could not execute Database Read, as Query and Params as Dictionay is needed")
return res
async def get_history(self, **kwargs):
"""Get the history of data from the database"""
tables = None
try:
entity_id = kwargs.get("entity_id")
bucket = kwargs.get("bucket", self._bucket)
measurement = kwargs.get("measurement")
field = kwargs.get("field")
filter_tags = kwargs.get("filter_tags")
query = kwargs.get("query")
params = kwargs.get("params")
if query is None or not isinstance(params, dict):
# only run this if the query is not given
# first process time interval of the request
start_time, end_time = self.get_history_time(**kwargs)
if bucket is None:
raise ValueError("The required bucket to be accessed must be given")
params = {"_start": start_time, "_stop": end_time, "_desc": True}
query = f"""
from(bucket:"{bucket}") |> range(start: _start, stop: _stop)
"""
if measurement is not None:
query = query + f'|> filter(fn: (r) => r["_measurement"] == "{measurement}")'
if field is not None:
query = query + f'|> filter(fn: (r) => r["_field"] == "{field}")'
if entity_id is not None:
# need to use entity_id as tag
params["_entity_id"] = entity_id
query = query + '|> filter(fn: (r) => r["entity_id"] == _entity_id)'
if isinstance(filter_tags, dict):
read_tag = {}
for tag, value in filter_tags.items():
if not tag.startswith("_"):
_tag = f"_{tag}"
else:
_tag = tag
read_tag[_tag] = value
striped_tag = _tag.lstrip("_")
query = query + f'|> filter(fn: (r) => r["{striped_tag}"] == {_tag})'
# update the params
params.update(read_tag)
# specify decending order by time
query = query + '|> sort(columns: ["_time"], desc: _desc)'
tables = await self.database_read(bucket, query=query, params=params)
except Exception as e:
self.logger.error("-" * 60)
self.logger.error("Could not execute database read. %s %s", bucket, kwargs)
self.logger.error("-" * 60)
self.logger.error(e)
self.logger.debug(traceback.format_exc())
self.logger.error("-" * 60)
return tables
def get_history_time(self, **kwargs):
days = kwargs.get("days", 1)
start_time = kwargs.get("start_time")
end_time = kwargs.get("end_time")
if start_time is not None:
if isinstance(start_time, str):
start_time = utils.str_to_dt(start_time).replace(microsecond=0)
elif isinstance(start_time, datetime.datetime):
start_time = self.AD.tz.localize(start_time).replace(microsecond=0)
else:
raise ValueError("Invalid type for start time")
if end_time is not None:
if isinstance(end_time, str):
end_time = utils.str_to_dt(end_time).replace(microsecond=0)
elif isinstance(end_time, datetime.datetime):
end_time = self.AD.tz.localize(end_time).replace(microsecond=0)
else:
raise ValueError("Invalid type for end time")
if start_time is not None and end_time is None:
end_time = start_time + timedelta(days=days)
# if endtime is declared and start_time is not declared,
# and days specified
elif end_time is not None and start_time is None:
start_time = end_time - timedelta(days=days)
elif start_time is None and end_time is None:
end_time = datetime.now()
start_time = end_time - timedelta(days=days)
return start_time, end_time
async def check_entity_id(self, namespace, entity_id):
"""Check if to store the entity's data"""
execute = True
if self._databases[namespace] is None:
# there is no filers used for the database
pass
elif "exclude_entities" in self._databases[namespace]:
excluded_entities = self._databases[namespace]["exclude_entities"]
if isinstance(excluded_entities, str):
execute = self.wildcard_check(excluded_entities, entity_id)
execute = not execute # invert it
elif isinstance(excluded_entities, list):
for entity in excluded_entities:
execute = self.wildcard_check(entity, entity_id)
execute = not execute # invert it
if execute is False:
break
elif "include_entities" in self._databases[namespace]:
execute = False
included_entities = self._databases[namespace]["include_entities"]
if isinstance(included_entities, str):
execute = self.wildcard_check(included_entities, entity_id)
elif isinstance(included_entities, list):
for entity in included_entities:
execute = self.wildcard_check(entity, entity_id)
if execute is True:
break
return execute
def wildcard_check(self, wildcard, data):
"""Used to check for if the data is within the wildcard"""
execute = False
if wildcard == data:
execute = True
elif wildcard.endswith("*") and data.startswith(wildcard[:-1]):
execute = True
elif wildcard.startswith("*") and data.endswith(wildcard[1:]):
execute = True
return execute
def get_namespace(self):
return self.namespace
@property
def get_write_api(self):
return self._write_api
@property
def get_query_api(self):
return self._query_api
|
import asyncio
import copy
from influxdb_client import InfluxDBClient
from influxdb_client.client.write_api import SYNCHRONOUS
from datetime import datetime, timedelta
import iso8601
from appdaemon.appdaemon import AppDaemon
from appdaemon.plugin_management import PluginBase
import appdaemon.utils as utils
import traceback
CONST_TRUE_STATES = ("on", "y", "yes", "true", "home", "opened", "unlocked", True)
CONST_FALSE_STATES = ("off", "n", "no", "false", "away", "closed", "locked", False)
class InfluxdbPlugin(PluginBase):
def __init__(self, ad: AppDaemon, name, args):
super().__init__(ad, name, args)
self.AD = ad
self.stopping = False
self.config = args
self.name = name
self.initialized = False
self.state = {}
self._namespaces = {}
self._client = None
self._write_api = None
self._query_api = None
if "namespace" in self.config:
self.namespace = self.config["namespace"]
else:
self.namespace = "default"
self.logger.info("Influx Database Plugin Initializing")
self._connection_url = self.config.get("connection_url", "http://127.0.0.1:8086")
self._databases = self.config.get("databases", {})
self._bucket = self.config.get("bucket")
self._org = self.config.get("org")
self._token = self.config.get("token")
if not all([self._org, self._token]):
raise ValueError("Cannot setup the Plugin, as all 'org' and 'token' settings must be given")
if not isinstance(self._databases, dict):
raise ValueError("The database setting is not Valid")
self._timeout = self.config.get("timeout")
self._connection_pool_maxsize = int(self.config.get("connection_pool_maxsize", 100))
self._verify_ssl = self.config.get("verify_ssl", False)
self._ssl_ca_cert = self.config.get("ssl_ca_cert")
if self._connection_pool_maxsize < 5:
self.logger.warning(
"Cannot use %s for Connection Pool, must be higher than 5. Reverting to 100",
self._connection_pool_maxsize,
)
self._connection_pool_maxsize = 100
self.loop = self.AD.loop # get AD loop
self.database_metadata = {
"version": "1.0",
"connection_url": self._connection_url,
"bucket": self._bucket,
"org": self._org,
"timeout": self._timeout,
"verify_ssl": self._verify_ssl,
"ssl_ca_cert": self._ssl_ca_cert,
}
def stop(self):
self.logger.debug("stop() called for %s", self.name)
self.stopping = True
# set to continue
self._event.set()
self.logger.info("Stopping Influx Database Plugin")
if self._client:
self._client.close()
#
# Placeholder for constraints
#
def list_constraints(self):
return []
#
# Get initial state
#
async def get_complete_state(self):
self.logger.debug("*** Sending Complete State: %s ***", self.state)
return copy.deepcopy(self.state)
async def get_metadata(self):
return self.database_metadata
#
# Utility gets called every second (or longer if configured
# Allows plugin to do any housekeeping required
#
def utility(self):
# self.logger.info("*** Utility ***".format(self.state))
return
#
# Handle state updates
#
async def get_updates(self):
already_notified = False
first_time = True
self.reading = False
self._event = asyncio.Event()
# set to continue
self._event.set()
while not self.stopping:
await self._event.wait()
if self.stopping is True:
return
try:
if self._client is None: # it has not been set
client_options = {"connection_pool_maxsize": self._connection_pool_maxsize}
if self._timeout is not None:
client_options["timeout"] = self._timeout
if self._verify_ssl is True:
client_options["verify_ssl"] = True
if self._ssl_ca_cert is not None:
client_options["ssl_ca_cert"] = self._ssl_ca_cert
self._client = await utils.run_in_executor(
self,
InfluxDBClient,
url=self._connection_url,
token=self._token,
org=self._org,
**client_options,
)
self._write_api = self._client.write_api(write_options=SYNCHRONOUS)
self._query_api = self._client.query_api()
if self._client is not None:
self.logger.info("Connected to Database using URL %s", self._connection_url)
states = await self.get_complete_state()
self.AD.services.register_service(
self.namespace, "influx", "write", self.call_plugin_service,
)
self.AD.services.register_service(
self.namespace, "influx", "read", self.call_plugin_service,
)
self.AD.services.register_service(
self.namespace, "influx", "get_history", self.call_plugin_service,
)
# now we register for the different namespaces
for ns, settings in self._databases.items():
if ns not in self._namespaces:
self._namespaces[ns] = {}
# we check for tags
ns_tags = settings.get("tags", [])
if isinstance(ns_tags, str):
ns_tags = [ns_tags]
self._namespaces[ns]["tags"] = ns_tags
self._namespaces[ns]["bucket"] = settings.get("bucket", self._bucket)
self._namespaces[ns]["handle"] = await self.AD.events.add_event_callback(
self.name, ns, self.event_callback, "state_changed", __silent=True, __namespace=ns,
)
await self.AD.plugins.notify_plugin_started(
self.name, self.namespace, self.database_metadata, states, first_time,
)
first_time = False
already_notified = False
self._event.clear() # it should stop
elif already_notified is False:
await self.AD.plugins.notify_plugin_stopped(self.name, self.namespace)
already_notified = True
self.logger.warning("Could not connect to the Database, will attempt in 5 seconds")
except Exception as e:
self.logger.error("-" * 60)
self.logger.error(
"Could not setup connection to database %s", self._connection_url,
)
self.logger.error("-" * 60)
self.logger.error(e)
self.logger.debug(traceback.format_exc())
self.logger.error("-" * 60)
self._client = None
await asyncio.sleep(5)
async def event_callback(self, event, data, kwargs):
self.logger.debug("event_callback: %s %s %s", kwargs, event, data)
_namespace = kwargs["__namespace"]
entity_id = data["entity_id"]
if not await self.check_entity_id(_namespace, entity_id):
return
if data["new_state"]["state"] == data["old_state"].get("state"):
# nothing changed
return
bucket = self._namespaces[_namespace]["bucket"] # get the databases in this namespace
tags = self._namespaces[_namespace]["tags"]
state = data["new_state"]["state"]
try:
state = float(state)
except Exception:
if state in CONST_TRUE_STATES:
state = 1.0
elif state in CONST_FALSE_STATES:
state = 0.0
else:
self.logger.warning(
f"Could not map {state} for {entity_id} Entity_ID to any valid data, and so will be ignored"
)
return
attributes = data["new_state"]["attributes"]
friendly_name = attributes["friendly_name"]
domain, _ = entity_id.split(".")
lc = data["new_state"].get("last_changed")
if lc is None:
last_changed = await self.AD.sched.get_now()
else:
last_changed = iso8601.parse_date(lc)
write_tags = {"entity_id": entity_id}
for tag in tags:
if tag in attributes:
write_tags[tag] = attributes[tag]
fields = {domain: state}
if self.stopping is False:
asyncio.create_task(
self.database_write(
bucket, measurement=friendly_name, tags=write_tags, fields=fields, timestamp=last_changed
)
)
#
# Service Call
#
async def call_plugin_service(self, namespace, domain, service, kwargs):
self.logger.debug(
"call_plugin_service() namespace=%s domain=%s service=%s kwargs=%s", namespace, domain, service, kwargs,
)
res = None
bucket = kwargs.pop("bucket", self._bucket)
if bucket is None:
raise ValueError("Bucket must be given to execute the service call %s", service)
if service == "write":
asyncio.create_task(self.database_write(bucket, **kwargs))
elif service == "read":
res = await self.database_read(bucket, **kwargs)
elif service == "get_history":
return await self.get_history(**kwargs)
return res
async def database_write(self, bucket, **kwargs):
"""Used to execute a database query"""
executed = False
measurement = kwargs.get("measurement")
tags = kwargs.get("tags")
fields = kwargs.get("fields")
ts = kwargs.get("timestamp", await self.AD.sched.get_now())
try:
write_data = {}
if measurement is not None:
write_data["measurement"] = measurement
if isinstance(tags, dict):
write_data["tags"] = tags
if isinstance(fields, dict):
write_data["fields"] = fields
write_data["time"] = ts
await asyncio.wait_for(
utils.run_in_executor(self, self._write_api.write, bucket, self._org, write_data), timeout=5
)
except Exception as e:
self.logger.error("-" * 60)
self.logger.error("Could not execute database write. %s %s", bucket, kwargs)
self.logger.error("-" * 60)
self.logger.error(e)
self.logger.debug(traceback.format_exc())
self.logger.error("-" * 60)
return executed
async def database_read(self, bucket, **kwargs):
"""Used to fetch data from a database"""
res = []
query = kwargs.get("query")
params = kwargs.get("params")
if query is not None and isinstance(params, dict):
try:
tables = await utils.run_in_executor(self, self._query_api.query, query, params=params)
for table in tables:
for record in table.records:
res.append(record)
except Exception as e:
self.logger.error("-" * 60)
self.logger.error("Could not execute database read for query %s", query)
self.logger.error("-" * 60)
self.logger.error(e)
self.logger.debug(traceback.format_exc())
self.logger.error("-" * 60)
else:
self.logger.warning("Could not execute Database Read, as Query and Params as Dictionay is needed")
return res
async def get_history(self, **kwargs):
"""Get the history of data from the database"""
tables = None
try:
entity_id = kwargs.get("entity_id")
bucket = kwargs.get("bucket", self._bucket)
measurement = kwargs.get("measurement")
field = kwargs.get("field")
filter_tags = kwargs.get("filter_tags")
query = kwargs.get("query")
params = kwargs.get("params")
if query is None or not isinstance(params, dict):
# only run this if the query is not given
# first process time interval of the request
start_time, end_time = self.get_history_time(**kwargs)
if bucket is None:
raise ValueError("The required bucket to be accessed must be given")
params = {"_start": start_time, "_stop": end_time, "_desc": True}
query = f"""
from(bucket:"{bucket}") |> range(start: _start, stop: _stop)
"""
if measurement is not None:
query = query + f'|> filter(fn: (r) => r["_measurement"] == "{measurement}")'
if field is not None:
query = query + f'|> filter(fn: (r) => r["_field"] == "{field}")'
if entity_id is not None:
# need to use entity_id as tag
params["_entity_id"] = entity_id
query = query + '|> filter(fn: (r) => r["entity_id"] == _entity_id)'
if isinstance(filter_tags, dict):
read_tag = {}
for tag, value in filter_tags.items():
if not tag.startswith("_"):
_tag = f"_{tag}"
else:
_tag = tag
read_tag[_tag] = value
striped_tag = _tag.lstrip("_")
query = query + f'|> filter(fn: (r) => r["{striped_tag}"] == {_tag})'
# update the params
params.update(read_tag)
# specify decending order by time
query = query + '|> sort(columns: ["_time"], desc: _desc)'
tables = await self.database_read(bucket, query=query, params=params)
except Exception as e:
self.logger.error("-" * 60)
self.logger.error("Could not execute database read. %s %s", bucket, kwargs)
self.logger.error("-" * 60)
self.logger.error(e)
self.logger.debug(traceback.format_exc())
self.logger.error("-" * 60)
return tables
def get_history_time(self, **kwargs):
days = kwargs.get("days", 1)
start_time = kwargs.get("start_time")
end_time = kwargs.get("end_time")
if start_time is not None:
if isinstance(start_time, str):
start_time = utils.str_to_dt(start_time).replace(microsecond=0)
elif isinstance(start_time, datetime.datetime):
start_time = self.AD.tz.localize(start_time).replace(microsecond=0)
else:
raise ValueError("Invalid type for start time")
if end_time is not None:
if isinstance(end_time, str):
end_time = utils.str_to_dt(end_time).replace(microsecond=0)
elif isinstance(end_time, datetime.datetime):
end_time = self.AD.tz.localize(end_time).replace(microsecond=0)
else:
raise ValueError("Invalid type for end time")
if start_time is not None and end_time is None:
end_time = start_time + timedelta(days=days)
# if endtime is declared and start_time is not declared,
# and days specified
elif end_time is not None and start_time is None:
start_time = end_time - timedelta(days=days)
elif start_time is None and end_time is None:
end_time = datetime.now()
start_time = end_time - timedelta(days=days)
return start_time, end_time
async def check_entity_id(self, namespace, entity_id):
"""Check if to store the entity's data"""
execute = True
if self._databases[namespace] is None:
# there is no filers used for the database
pass
elif "exclude_entities" in self._databases[namespace]:
excluded_entities = self._databases[namespace]["exclude_entities"]
if isinstance(excluded_entities, str):
execute = self.wildcard_check(excluded_entities, entity_id)
execute = not execute # invert it
elif isinstance(excluded_entities, list):
for entity in excluded_entities:
execute = self.wildcard_check(entity, entity_id)
execute = not execute # invert it
if execute is False:
break
elif "include_entities" in self._databases[namespace]:
execute = False
included_entities = self._databases[namespace]["include_entities"]
if isinstance(included_entities, str):
execute = self.wildcard_check(included_entities, entity_id)
elif isinstance(included_entities, list):
for entity in included_entities:
execute = self.wildcard_check(entity, entity_id)
if execute is True:
break
return execute
def wildcard_check(self, wildcard, data):
"""Used to check for if the data is within the wildcard"""
execute = False
if wildcard == data:
execute = True
elif wildcard.endswith("*") and data.startswith(wildcard[:-1]):
execute = True
elif wildcard.startswith("*") and data.endswith(wildcard[1:]):
execute = True
return execute
def get_namespace(self):
return self.namespace
@property
def get_write_api(self):
return self._write_api
@property
def get_query_api(self):
return self._query_api
|
en
| 0.749743
|
# get AD loop # set to continue # # Placeholder for constraints # # # Get initial state # # # Utility gets called every second (or longer if configured # Allows plugin to do any housekeeping required # # self.logger.info("*** Utility ***".format(self.state)) # # Handle state updates # # set to continue # it has not been set # now we register for the different namespaces # we check for tags # it should stop # nothing changed # get the databases in this namespace # # Service Call # Used to execute a database query Used to fetch data from a database Get the history of data from the database # only run this if the query is not given # first process time interval of the request from(bucket:"{bucket}") |> range(start: _start, stop: _stop) # need to use entity_id as tag # update the params # specify decending order by time # if endtime is declared and start_time is not declared, # and days specified Check if to store the entity's data # there is no filers used for the database # invert it # invert it Used to check for if the data is within the wildcard
| 2.05077
| 2
|
final_project/machinetranslation/translator.py
|
ankur1198/xzceb-flask_eng_fr
| 0
|
6625694
|
<gh_stars>0
"""Peer graded Assognment"""
import json
import os
from ibm_watson import LanguageTranslatorV3
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from dotenv import load_dotenv
load_dotenv()
apikey = '<KEY>'
url = 'https://api.us-south.language-translator.watson.cloud.ibm.com/instances/113e5b0a-69a2-4f4e-b8d3-a47985310cd6'
authenticator = IAMAuthenticator(apikey)
language_translator = LanguageTranslatorV3(
version='2018-05-01',
authenticator=authenticator
)
language_translator.set_service_url(url)
def english_to_french(englishText):
"""Translates From English to French"""
#englishText = input()
frenchText = language_translator.translate(
text=englishText,
model_id= 'en-fr').get_result()
return frenchText.get("translations")[0].get("translation")
def french_to_english(frenchText):
"""Translates From French to English"""
#frenchText = input()
englishText = language_translator.translate(
text=frenchText,
model_id= 'fr-en').get_result()
return englishText.get("translations")[0].get("translation")
print()
print("\n")
|
"""Peer graded Assognment"""
import json
import os
from ibm_watson import LanguageTranslatorV3
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from dotenv import load_dotenv
load_dotenv()
apikey = '<KEY>'
url = 'https://api.us-south.language-translator.watson.cloud.ibm.com/instances/113e5b0a-69a2-4f4e-b8d3-a47985310cd6'
authenticator = IAMAuthenticator(apikey)
language_translator = LanguageTranslatorV3(
version='2018-05-01',
authenticator=authenticator
)
language_translator.set_service_url(url)
def english_to_french(englishText):
"""Translates From English to French"""
#englishText = input()
frenchText = language_translator.translate(
text=englishText,
model_id= 'en-fr').get_result()
return frenchText.get("translations")[0].get("translation")
def french_to_english(frenchText):
"""Translates From French to English"""
#frenchText = input()
englishText = language_translator.translate(
text=frenchText,
model_id= 'fr-en').get_result()
return englishText.get("translations")[0].get("translation")
print()
print("\n")
|
en
| 0.751874
|
Peer graded Assognment Translates From English to French #englishText = input() Translates From French to English #frenchText = input()
| 2.826004
| 3
|
mqtt-poly.py
|
therealmysteryman/udi-mqtt-poly
| 0
|
6625695
|
<gh_stars>0
#!/usr/bin/env python3
import polyinterface
import sys
import logging
import paho.mqtt.client as mqtt
import json
import yaml
LOGGER = polyinterface.LOGGER
class Controller(polyinterface.Controller):
def __init__(self, polyglot):
super().__init__(polyglot)
self.name = 'MQTT Controller Sensor Raw'
self.address = 'mqctrl'
self.primary = self.address
self.mqtt_server = 'localhost'
self.mqtt_port = 1883
self.mqtt_user = None
self.mqtt_password = None
self.devlist = None
# example: [ {'id': 'sonoff1', 'type': 'switch', 'status_topic': 'stat/sonoff1/power', 'cmd_topic': 'cmnd/sonoff1/power'} ]
self.status_topics = []
self.mqttc = None
def start(self):
# LOGGER.setLevel(logging.INFO)
LOGGER.info('Started MQTT controller')
if 'mqtt_server' in self.polyConfig['customParams']:
self.mqtt_server = self.polyConfig['customParams']['mqtt_server']
if 'mqtt_port' in self.polyConfig['customParams']:
self.mqtt_port = int(self.polyConfig['customParams']['mqtt_port'])
if 'mqtt_user' not in self.polyConfig['customParams']:
LOGGER.error('mqtt_user must be configured')
return False
if 'mqtt_password' not in self.polyConfig['customParams']:
LOGGER.error('mqtt_password must be configured')
return False
self.mqtt_user = self.polyConfig['customParams']['mqtt_user']
self.mqtt_password = self.polyConfig['customParams']['mqtt_password']
if 'devfile' in self.polyConfig['customParams']:
try:
f = open(self.polyConfig['customParams']['devfile'])
except Exception as ex:
LOGGER.error('Failed to open {}: {}'.format(self.polyConfig['customParams']['devfile'], ex))
return False
try:
data = yaml.safe_load(f.read())
f.close()
except Exception as ex:
LOGGER.error('Failed to parse {} content: {}'.format(self.polyConfig['customParams']['devfile'], ex))
return False
if 'devices' not in data:
LOGGER.error('Manual discovery file {} is missing bulbs section'.format(self.polyConfig['customParams']['devfile']))
return False
self.devlist = data['devices']
elif 'devlist' in self.polyConfig['customParams']:
try:
self.devlist = json.loads(self.polyConfig['customParams']['devlist'])
except Exception as ex:
LOGGER.error('Failed to parse the devlist: {}'.format(ex))
return False
else:
LOGGER.error('devlist must be configured')
return False
self.mqttc = mqtt.Client()
self.mqttc.on_connect = self._on_connect
self.mqttc.on_disconnect = self._on_disconnect
self.mqttc.on_message = self._on_message
self.mqttc.is_connected = False
for dev in self.devlist:
if 'id' not in dev or 'status_topic' not in dev or 'cmd_topic' not in dev or 'type' not in dev:
LOGGER.error('Invalid device definition: {}'.format(json.dumps(dev)))
continue
if 'name' in dev:
name = dev['name']
else:
name = dev['id']
address = dev['id'].lower().replace('_','')[:14]
if dev['type'] == 'switch':
if not address is self.nodes:
LOGGER.info('Adding {} {}'.format(dev['type'], name))
self.addNode(MQSwitch(self, self.address, address, name, dev))
self.status_topics.append(dev['status_topic'])
elif dev['type'] == 'sensor':
if not address is self.nodes:
LOGGER.info('Adding {} {}'.format(dev['type'], name))
self.addNode(MQSensor(self, self.address, address, name, dev))
self.status_topics.append(dev['status_topic'])
elif dev['type'] == 'flag':
if not address is self.nodes:
LOGGER.info('Adding {} {}'.format(dev['type'], name))
self.addNode(MQFlag(self, self.address, address, name, dev))
self.status_topics.append(dev['status_topic'])
elif dev['type'] == 'TempHumid':
if not address is self.nodes:
LOGGER.info('Adding {} {}'.format(dev['type'], name))
self.addNode(MQdht(self, self.address, address, name, dev))
self.status_topics.append(dev['status_topic'])
elif dev['type'] == 'Temp':
if not address is self.nodes:
LOGGER.info('Adding {} {}'.format(dev['type'], name))
self.addNode(MQds(self, self.address, address, name, dev))
self.status_topics.append(dev['status_topic'])
elif dev['type'] == 'TempHumidPress':
if not address is self.nodes:
LOGGER.info('Adding {} {}'.format(dev['type'], name))
self.addNode(MQbme(self, self.address, address, name, dev))
self.status_topics.append(dev['status_topic'])
elif dev['type'] == 'distance':
if not address is self.nodes:
LOGGER.info('Adding {} {}'.format(dev['type'], name))
self.addNode(MQhcsr(self, self.address, address, name, dev))
self.status_topics.append(dev['status_topic'])
elif dev['type'] == 'analog':
if not address is self.nodes:
LOGGER.info('Adding {} {}'.format(dev['type'], name))
self.addNode(MQAnalog(self, self.address, address, name, dev))
self.status_topics.append(dev['status_topic'])
elif dev['type'] == 's31':
if not address is self.nodes:
LOGGER.info('Adding {} {}'.format(dev['type'], name))
self.addNode(MQs31(self, self.address, address, name, dev))
self.status_topics.append(dev['status_topic'])
elif dev['type'] == 'raw':
if not address is self.nodes:
LOGGER.info('Adding {} {}'.format(dev['type'], name))
self.addNode(MQraw(self, self.address, address, name, dev))
self.status_topics.append(dev['status_topic'])
else:
LOGGER.error('Device type {} is not yet supported'.format(dev['type']))
LOGGER.info('Done adding nodes, connecting to MQTT broker...')
self.mqttc.username_pw_set(self.mqtt_user, self.mqtt_password)
try:
self.mqttc.connect(self.mqtt_server, self.mqtt_port, 10)
self.mqttc.loop_start()
except Exception as ex:
LOGGER.error('Error connecting to Poly MQTT broker {}'.format(ex))
return False
return True
def _on_connect(self, mqttc, userdata, flags, rc):
if rc == 0:
LOGGER.info('Poly MQTT Connected, subscribing...')
self.mqttc.is_connected = True
results = []
for stopic in self.status_topics:
results.append((stopic, tuple(self.mqttc.subscribe(stopic))))
for (topic, (result, mid)) in results:
if result == 0:
LOGGER.info('Subscribed to {} MID: {}, res: {}'.format(topic, mid, result))
else:
LOGGER.error('Failed to subscribe {} MID: {}, res: {}'.format(topic, mid, result))
for node in self.nodes:
if self.nodes[node].address != self.address:
self.nodes[node].query()
else:
LOGGER.error('Poly MQTT Connect failed')
def _on_disconnect(self, mqttc, userdata, rc):
self.mqttc.is_connected = False
if rc != 0:
LOGGER.warning('Poly MQTT disconnected, trying to re-connect')
try:
self.mqttc.reconnect()
except Exception as ex:
LOGGER.error('Error connecting to Poly MQTT broker {}'.format(ex))
return False
else:
LOGGER.info('Poly MQTT graceful disconnection')
def _on_message(self, mqttc, userdata, message):
topic = message.topic
payload = message.payload.decode('utf-8')
LOGGER.debug('Received {} from {}'.format(payload, topic))
try:
self.nodes[self._dev_by_topic(topic)].updateInfo(payload)
except Exception as ex:
LOGGER.error('Failed to process message {}'.format(ex))
def _dev_by_topic(self, topic):
for dev in self.devlist:
if dev['status_topic'] == topic:
return dev['id'].lower()[:14]
return None
def mqtt_pub(self, topic, message):
self.mqttc.publish(topic, message, retain=False)
def stop(self):
self.mqttc.loop_stop()
self.mqttc.disconnect()
LOGGER.info('MQTT is stopping')
def updateInfo(self):
pass
def query(self, command=None):
for node in self.nodes:
self.nodes[node].reportDrivers()
def discover(self, command=None):
pass
id = 'MQCTRL'
commands = {'DISCOVER': discover}
drivers = [{'driver': 'ST', 'value': 1, 'uom': 2}]
class MQSwitch(polyinterface.Node):
def __init__(self, controller, primary, address, name, device):
super().__init__(controller, primary, address, name)
self.cmd_topic = device['cmd_topic']
self.on = False
def start(self):
pass
def updateInfo(self, payload):
if payload == 'ON':
if not self.on:
self.reportCmd('DON')
self.on = True
self.setDriver('ST', 100)
elif payload == 'OFF':
if self.on:
self.reportCmd('DOF')
self.on = False
self.setDriver('ST', 0)
else:
LOGGER.error('Invalid payload {}'.format(payload))
def set_on(self, command):
self.on = True
self.controller.mqtt_pub(self.cmd_topic, 'ON')
def set_off(self, command):
self.on = False
self.controller.mqtt_pub(self.cmd_topic, 'OFF')
def query(self, command=None):
self.controller.mqtt_pub(self.cmd_topic, '')
self.reportDrivers()
drivers = [{'driver': 'ST', 'value': 0, 'uom': 78}
]
id = 'MQSW'
hint = [4, 2, 0, 0]
commands = {
'QUERY': query, 'DON': set_on, 'DOF': set_off
}
class MQSensor(polyinterface.Node):
def __init__(self, controller, primary, address, name, device):
super().__init__(controller, primary, address, name)
self.cmd_topic = device['cmd_topic']
self.on = False
self.motion = False
def start(self):
pass
def updateInfo(self, payload):
try:
data = json.loads(payload)
except Exception as ex:
LOGGER.error('Failed to parse MQTT Payload as Json: {} {}'.format(ex, payload))
return False
# motion detector
if 'motion' in data:
if data['motion'] == 'standby':
self.setDriver('ST', 0)
if self.motion:
self.motion = False
self.reportCmd('DOF')
else:
self.setDriver('ST', 1)
if not self.motion:
self.motion = True
self.reportCmd('DON')
else:
self.setDriver('ST', 0)
# temperature
if 'temperature' in data:
self.setDriver('CLITEMP', data['temperature'])
# heatIndex
if 'heatIndex' in data:
self.setDriver('GPV', data['heatIndex'])
# humidity
if 'humidity' in data:
self.setDriver('CLIHUM', data['humidity'])
# light detecor reading
if 'ldr' in data:
self.setDriver('LUMIN', data['ldr'])
# LED
if 'state' in data:
# LED is present
if data['state'] == 'ON':
self.setDriver('GV0', 100)
else:
self.setDriver('GV0', 0)
if 'brightness' in data:
self.setDriver('GV1', data['brightness'])
if 'color' in data:
if 'r' in data['color']:
self.setDriver('GV2', data['color']['r'])
if 'g' in data['color']:
self.setDriver('GV3', data['color']['g'])
if 'b' in data['color']:
self.setDriver('GV4', data['color']['b'])
def led_on(self, command):
self.controller.mqtt_pub(self.cmd_topic, json.dumps({'state': 'ON'}))
def led_off(self, command):
self.controller.mqtt_pub(self.cmd_topic, json.dumps({'state': 'OFF'}))
def led_set(self, command):
query = command.get('query')
red = self._check_limit(int(query.get('R.uom100')))
green = self._check_limit(int(query.get('G.uom100')))
blue = self._check_limit(int(query.get('B.uom100')))
brightness = self._check_limit(int(query.get('I.uom100')))
transition = int(query.get('D.uom58'))
flash = int(query.get('F.uom58'))
cmd = { 'state': 'ON', 'brightness': brightness, 'color': {'r': red, 'g': green, 'b': blue } }
if transition > 0:
cmd['transition'] = transition
if flash > 0:
cmd['flash'] = flash
self.controller.mqtt_pub(self.cmd_topic, json.dumps(cmd))
def _check_limit(self, value):
if value > 255:
return 255
elif value < 0:
return 0
else:
return value
def query(self, command=None):
self.reportDrivers()
drivers = [{'driver': 'ST', 'value': 0, 'uom': 2},
{'driver': 'CLITEMP', 'value': 0, 'uom': 17},
{'driver': 'GPV', 'value': 0, 'uom': 17},
{'driver': 'CLIHUM', 'value': 0, 'uom': 22},
{'driver': 'LUMIN', 'value': 0, 'uom': 36},
{'driver': 'GV0', 'value': 0, 'uom': 78},
{'driver': 'GV1', 'value': 0, 'uom': 100},
{'driver': 'GV2', 'value': 0, 'uom': 100},
{'driver': 'GV3', 'value': 0, 'uom': 100},
{'driver': 'GV4', 'value': 0, 'uom': 100}
]
id = 'MQSENS'
commands = {
'QUERY': query, 'DON': led_on, 'DOF': led_off, 'SETLED': led_set
}
# this is meant as a flag for if you have a sensor or condition on your IOT device
# which you want the device program rather than the ISY to flag
# FLAG-0 = OK
# FLAG-1 = NOK
# FLAG-2 = LO
# FLAG-3 = HI
# FLAG-4 = ERR
# FLAG-5 = IN
# FLAG-6 = OUT
# FLAG-7 = UP
# FLAG-8 = DOWN
# FLAG-9 = TRIGGER
# FLAG-10 = ON
# FLAG-11 = OFF
# FLAG-12 = ---
# payload is direct (like SW) not JSON encoded (like SENSOR)
# example device: liquid float {OK, LO, HI}
# example condition: IOT devices sensor connections {OK, NOK, ERR(OR)}
class MQFlag(polyinterface.Node):
def __init__(self, controller, primary, address, name, device):
super().__init__(controller, primary, address, name)
self.cmd_topic = device['cmd_topic']
def start(self):
pass
def updateInfo(self, payload):
if payload == 'OK':
self.setDriver('ST', 0)
elif payload == 'NOK':
self.setDriver('ST', 1)
elif payload == 'LO':
self.setDriver('ST', 2)
elif payload == 'HI':
self.setDriver('ST', 3)
elif payload == 'IN':
self.setDriver('ST', 5)
elif payload == 'OUT':
self.setDriver('ST', 6)
elif payload == 'UP':
self.setDriver('ST', 7)
elif payload == 'DOWN':
self.setDriver('ST', 8)
elif payload == 'TRIGGER':
self.setDriver('ST', 9)
elif payload == 'ON':
self.setDriver('ST', 10)
elif payload == 'OFF':
self.setDriver('ST', 11)
elif payload == '---':
self.setDriver('ST', 12)
else:
LOGGER.error('Invalid payload {}'.format(payload))
payload = 'ERR'
self.setDriver('ST', 4)
def reset_send(self, command):
self.controller.mqtt_pub(self.cmd_topic, 'RESET')
def query(self, command=None):
self.controller.mqtt_pub(self.cmd_topic, '')
self.reportDrivers()
drivers = [{'driver': 'ST', 'value': 0, 'uom': 25}
]
id = 'MQFLAG'
commands = {
'QUERY': query, 'RESET': reset_send
}
# This class is an attempt to add support for temperature/humidity sensors.
# It was originally developed with a DHT22, but should work with
# any of the following, since they I believe they get identified by tomaso the same:
# DHT21, AM2301, AM2302, AM2321
# Should be easy to add other temp/humdity sensors.
class MQdht(polyinterface.Node):
def __init__(self, controller, primary, address, name, device):
super().__init__(controller, primary, address, name)
self.on = False
def start(self):
pass
def updateInfo(self, payload):
try:
data = json.loads(payload)
except Exception as ex:
LOGGER.error('Failed to parse MQTT Payload as Json: {} {}'.format(ex, payload))
return False
if 'AM2301' in data:
self.setDriver('ST', 1)
self.setDriver('CLITEMP', data['AM2301']['Temperature'])
self.setDriver('CLIHUM', data['AM2301']['Humidity'])
else:
self.setDriver('ST', 0)
def query(self, command=None):
self.reportDrivers()
drivers = [{'driver': 'ST', 'value': 0, 'uom': 2},
{'driver': 'CLITEMP', 'value': 0, 'uom': 17},
{'driver': 'CLIHUM', 'value': 0, 'uom': 22}
]
id = 'MQDHT'
commands = {
'QUERY': query
}
# This class is an attempt to add support for temperature only sensors.
# was made for DS18B20 waterproof
class MQds(polyinterface.Node):
def __init__(self, controller, primary, address, name, device):
super().__init__(controller, primary, address, name)
self.on = False
def start(self):
pass
def updateInfo(self, payload):
try:
data = json.loads(payload)
except Exception as ex:
LOGGER.error('Failed to parse MQTT Payload as Json: {} {}'.format(ex, payload))
return False
if 'DS18B20' in data:
self.setDriver('ST', 1)
self.setDriver('CLITEMP', data['DS18B20']['Temperature'])
else:
self.setDriver('ST', 0)
def query(self, command=None):
self.reportDrivers()
drivers = [{'driver': 'ST', 'value': 0, 'uom': 2},
{'driver': 'CLITEMP', 'value': 0, 'uom': 17}
]
id = 'MQDS'
commands = {
'QUERY': query
}
# This class is an attempt to add support for temperature/humidity/pressure sensors.
# Currently supports the BME280. Could be extended to accept others.
class MQbme(polyinterface.Node):
def __init__(self, controller, primary, address, name, device):
super().__init__(controller, primary, address, name)
self.on = False
def start(self):
pass
def updateInfo(self, payload):
try:
data = json.loads(payload)
except Exception as ex:
LOGGER.error('Failed to parse MQTT Payload as Json: {} {}'.format(ex, payload))
return False
if 'BME280' in data:
self.setDriver('ST', 1)
self.setDriver('CLITEMP', data['BME280']['Temperature'])
self.setDriver('CLIHUM', data['BME280']['Humidity'])
# Converting to "Hg, could do this in sonoff-tomasto
# or just report the raw hPA (or convert to kPA).
press = format(round(float('.02952998751') * float(data['BME280']['Pressure']),2))
self.setDriver('BARPRES', press)
else:
self.setDriver('ST', 0)
def query(self, command=None):
self.reportDrivers()
drivers = [{'driver': 'ST', 'value': 0, 'uom': 2},
{'driver': 'CLITEMP', 'value': 0, 'uom': 17},
{'driver': 'CLIHUM', 'value': 0, 'uom': 22},
{'driver': 'BARPRES', 'value': 0, 'uom': 23}
]
id = 'MQBME'
commands = {
'QUERY': query
}
# This class is an attempt to add support for HC-SR04 Ultrasonic Sensor.
# Returns distance in centimeters.
class MQhcsr(polyinterface.Node):
def __init__(self, controller, primary, address, name, device):
super().__init__(controller, primary, address, name)
self.on = False
def start(self):
pass
def updateInfo(self, payload):
try:
data = json.loads(payload)
except Exception as ex:
LOGGER.error('Failed to parse MQTT Payload as Json: {} {}'.format(ex, payload))
return False
if 'SR04' in data:
self.setDriver('ST', 1)
self.setDriver('DISTANC', data['SR04']['Distance'])
else:
self.setDriver('ST', 0)
self.setDriver('DISTANC', 0)
def query(self, command=None):
self.reportDrivers()
drivers = [{'driver': 'ST', 'value': 0, 'uom': 2},
{'driver': 'DISTANC', 'value': 0, 'uom': 5}
]
id = 'MQHCSR'
commands = {
'QUERY': query
}
# General purpose Analog input using ADC.
# Setting max value in editor.xml as 1024, as that would be the max for
# onboard ADC, but that might need to be changed for external ADCs.
class MQAnalog(polyinterface.Node):
def __init__(self, controller, primary, address, name, device):
super().__init__(controller, primary, address, name)
self.on = False
def start(self):
pass
def updateInfo(self, payload):
try:
data = json.loads(payload)
except Exception as ex:
LOGGER.error('Failed to parse MQTT Payload as Json: {} {}'.format(ex, payload))
return False
if 'ANALOG' in data:
self.setDriver('ST', 1)
self.setDriver('GPV', data['ANALOG']['A0'])
else:
self.setDriver('ST', 0)
self.setDriver('GPV', 0)
def query(self, command=None):
self.reportDrivers()
# GPV = "General Purpose Value"
# UOM:56 = "The raw value reported by device"
drivers = [{'driver': 'ST', 'value': 0, 'uom': 2},
{'driver': 'GPV', 'value': 0, 'uom': 56}
]
id = 'MQANAL'
commands = {
'QUERY': query
}
# Reading the telemetry data for a Sonoff S31 (use the switch for control)
class MQs31(polyinterface.Node):
def __init__(self, controller, primary, address, name, device):
super().__init__(controller, primary, address, name)
self.on = False
def start(self):
pass
def updateInfo(self, payload):
try:
data = json.loads(payload)
except Exception as ex:
LOGGER.error('Failed to parse MQTT Payload as Json: {} {}'.format(ex, payload))
return False
if 'ENERGY' in data:
self.setDriver('ST', 1)
self.setDriver('CC', data['ENERGY']['Current'])
self.setDriver('CPW', data['ENERGY']['Power'])
self.setDriver('CV', data['ENERGY']['Voltage'])
self.setDriver('PF', data['ENERGY']['Factor'])
self.setDriver('TPW', data['ENERGY']['Total'])
else:
self.setDriver('ST', 0)
def query(self, command=None):
self.reportDrivers()
drivers = [{'driver': 'ST', 'value': 0, 'uom': 2},
{'driver': 'CC', 'value': 0, 'uom': 1},
{'driver': 'CPW', 'value': 0, 'uom': 73},
{'driver': 'CV', 'value': 0, 'uom': 72},
{'driver': 'PF', 'value': 0, 'uom': 53},
{'driver': 'TPW', 'value': 0, 'uom': 33}
]
id = 'MQS31'
commands = {
'QUERY': query
}
class MQraw(polyinterface.Node):
def __init__(self, controller, primary, address, name, device):
super().__init__(controller, primary, address, name)
self.cmd_topic = device['cmd_topic']
self.on = False
def start(self):
pass
def updateInfo(self, payload):
try :
self.setDriver('ST', int(payload))
except Exception as ex :
LOGGER.error('Failed to parse MQTT Payload as Json: {} {}'.format(ex, payload))
def query(self, command=None):
self.controller.mqtt_pub(self.cmd_topic, '')
self.reportDrivers()
drivers = [{'driver': 'ST', 'value': 0, 'uom': 56}
]
id = 'MQSR'
hint = [4, 2, 0, 0]
commands = {
'QUERY': query
}
if __name__ == "__main__":
try:
polyglot = polyinterface.Interface('MQTT')
polyglot.start()
control = Controller(polyglot)
control.runForever()
except (KeyboardInterrupt, SystemExit):
sys.exit(0)
|
#!/usr/bin/env python3
import polyinterface
import sys
import logging
import paho.mqtt.client as mqtt
import json
import yaml
LOGGER = polyinterface.LOGGER
class Controller(polyinterface.Controller):
def __init__(self, polyglot):
super().__init__(polyglot)
self.name = 'MQTT Controller Sensor Raw'
self.address = 'mqctrl'
self.primary = self.address
self.mqtt_server = 'localhost'
self.mqtt_port = 1883
self.mqtt_user = None
self.mqtt_password = None
self.devlist = None
# example: [ {'id': 'sonoff1', 'type': 'switch', 'status_topic': 'stat/sonoff1/power', 'cmd_topic': 'cmnd/sonoff1/power'} ]
self.status_topics = []
self.mqttc = None
def start(self):
# LOGGER.setLevel(logging.INFO)
LOGGER.info('Started MQTT controller')
if 'mqtt_server' in self.polyConfig['customParams']:
self.mqtt_server = self.polyConfig['customParams']['mqtt_server']
if 'mqtt_port' in self.polyConfig['customParams']:
self.mqtt_port = int(self.polyConfig['customParams']['mqtt_port'])
if 'mqtt_user' not in self.polyConfig['customParams']:
LOGGER.error('mqtt_user must be configured')
return False
if 'mqtt_password' not in self.polyConfig['customParams']:
LOGGER.error('mqtt_password must be configured')
return False
self.mqtt_user = self.polyConfig['customParams']['mqtt_user']
self.mqtt_password = self.polyConfig['customParams']['mqtt_password']
if 'devfile' in self.polyConfig['customParams']:
try:
f = open(self.polyConfig['customParams']['devfile'])
except Exception as ex:
LOGGER.error('Failed to open {}: {}'.format(self.polyConfig['customParams']['devfile'], ex))
return False
try:
data = yaml.safe_load(f.read())
f.close()
except Exception as ex:
LOGGER.error('Failed to parse {} content: {}'.format(self.polyConfig['customParams']['devfile'], ex))
return False
if 'devices' not in data:
LOGGER.error('Manual discovery file {} is missing bulbs section'.format(self.polyConfig['customParams']['devfile']))
return False
self.devlist = data['devices']
elif 'devlist' in self.polyConfig['customParams']:
try:
self.devlist = json.loads(self.polyConfig['customParams']['devlist'])
except Exception as ex:
LOGGER.error('Failed to parse the devlist: {}'.format(ex))
return False
else:
LOGGER.error('devlist must be configured')
return False
self.mqttc = mqtt.Client()
self.mqttc.on_connect = self._on_connect
self.mqttc.on_disconnect = self._on_disconnect
self.mqttc.on_message = self._on_message
self.mqttc.is_connected = False
for dev in self.devlist:
if 'id' not in dev or 'status_topic' not in dev or 'cmd_topic' not in dev or 'type' not in dev:
LOGGER.error('Invalid device definition: {}'.format(json.dumps(dev)))
continue
if 'name' in dev:
name = dev['name']
else:
name = dev['id']
address = dev['id'].lower().replace('_','')[:14]
if dev['type'] == 'switch':
if not address is self.nodes:
LOGGER.info('Adding {} {}'.format(dev['type'], name))
self.addNode(MQSwitch(self, self.address, address, name, dev))
self.status_topics.append(dev['status_topic'])
elif dev['type'] == 'sensor':
if not address is self.nodes:
LOGGER.info('Adding {} {}'.format(dev['type'], name))
self.addNode(MQSensor(self, self.address, address, name, dev))
self.status_topics.append(dev['status_topic'])
elif dev['type'] == 'flag':
if not address is self.nodes:
LOGGER.info('Adding {} {}'.format(dev['type'], name))
self.addNode(MQFlag(self, self.address, address, name, dev))
self.status_topics.append(dev['status_topic'])
elif dev['type'] == 'TempHumid':
if not address is self.nodes:
LOGGER.info('Adding {} {}'.format(dev['type'], name))
self.addNode(MQdht(self, self.address, address, name, dev))
self.status_topics.append(dev['status_topic'])
elif dev['type'] == 'Temp':
if not address is self.nodes:
LOGGER.info('Adding {} {}'.format(dev['type'], name))
self.addNode(MQds(self, self.address, address, name, dev))
self.status_topics.append(dev['status_topic'])
elif dev['type'] == 'TempHumidPress':
if not address is self.nodes:
LOGGER.info('Adding {} {}'.format(dev['type'], name))
self.addNode(MQbme(self, self.address, address, name, dev))
self.status_topics.append(dev['status_topic'])
elif dev['type'] == 'distance':
if not address is self.nodes:
LOGGER.info('Adding {} {}'.format(dev['type'], name))
self.addNode(MQhcsr(self, self.address, address, name, dev))
self.status_topics.append(dev['status_topic'])
elif dev['type'] == 'analog':
if not address is self.nodes:
LOGGER.info('Adding {} {}'.format(dev['type'], name))
self.addNode(MQAnalog(self, self.address, address, name, dev))
self.status_topics.append(dev['status_topic'])
elif dev['type'] == 's31':
if not address is self.nodes:
LOGGER.info('Adding {} {}'.format(dev['type'], name))
self.addNode(MQs31(self, self.address, address, name, dev))
self.status_topics.append(dev['status_topic'])
elif dev['type'] == 'raw':
if not address is self.nodes:
LOGGER.info('Adding {} {}'.format(dev['type'], name))
self.addNode(MQraw(self, self.address, address, name, dev))
self.status_topics.append(dev['status_topic'])
else:
LOGGER.error('Device type {} is not yet supported'.format(dev['type']))
LOGGER.info('Done adding nodes, connecting to MQTT broker...')
self.mqttc.username_pw_set(self.mqtt_user, self.mqtt_password)
try:
self.mqttc.connect(self.mqtt_server, self.mqtt_port, 10)
self.mqttc.loop_start()
except Exception as ex:
LOGGER.error('Error connecting to Poly MQTT broker {}'.format(ex))
return False
return True
def _on_connect(self, mqttc, userdata, flags, rc):
if rc == 0:
LOGGER.info('Poly MQTT Connected, subscribing...')
self.mqttc.is_connected = True
results = []
for stopic in self.status_topics:
results.append((stopic, tuple(self.mqttc.subscribe(stopic))))
for (topic, (result, mid)) in results:
if result == 0:
LOGGER.info('Subscribed to {} MID: {}, res: {}'.format(topic, mid, result))
else:
LOGGER.error('Failed to subscribe {} MID: {}, res: {}'.format(topic, mid, result))
for node in self.nodes:
if self.nodes[node].address != self.address:
self.nodes[node].query()
else:
LOGGER.error('Poly MQTT Connect failed')
def _on_disconnect(self, mqttc, userdata, rc):
self.mqttc.is_connected = False
if rc != 0:
LOGGER.warning('Poly MQTT disconnected, trying to re-connect')
try:
self.mqttc.reconnect()
except Exception as ex:
LOGGER.error('Error connecting to Poly MQTT broker {}'.format(ex))
return False
else:
LOGGER.info('Poly MQTT graceful disconnection')
def _on_message(self, mqttc, userdata, message):
topic = message.topic
payload = message.payload.decode('utf-8')
LOGGER.debug('Received {} from {}'.format(payload, topic))
try:
self.nodes[self._dev_by_topic(topic)].updateInfo(payload)
except Exception as ex:
LOGGER.error('Failed to process message {}'.format(ex))
def _dev_by_topic(self, topic):
for dev in self.devlist:
if dev['status_topic'] == topic:
return dev['id'].lower()[:14]
return None
def mqtt_pub(self, topic, message):
self.mqttc.publish(topic, message, retain=False)
def stop(self):
self.mqttc.loop_stop()
self.mqttc.disconnect()
LOGGER.info('MQTT is stopping')
def updateInfo(self):
pass
def query(self, command=None):
for node in self.nodes:
self.nodes[node].reportDrivers()
def discover(self, command=None):
pass
id = 'MQCTRL'
commands = {'DISCOVER': discover}
drivers = [{'driver': 'ST', 'value': 1, 'uom': 2}]
class MQSwitch(polyinterface.Node):
def __init__(self, controller, primary, address, name, device):
super().__init__(controller, primary, address, name)
self.cmd_topic = device['cmd_topic']
self.on = False
def start(self):
pass
def updateInfo(self, payload):
if payload == 'ON':
if not self.on:
self.reportCmd('DON')
self.on = True
self.setDriver('ST', 100)
elif payload == 'OFF':
if self.on:
self.reportCmd('DOF')
self.on = False
self.setDriver('ST', 0)
else:
LOGGER.error('Invalid payload {}'.format(payload))
def set_on(self, command):
self.on = True
self.controller.mqtt_pub(self.cmd_topic, 'ON')
def set_off(self, command):
self.on = False
self.controller.mqtt_pub(self.cmd_topic, 'OFF')
def query(self, command=None):
self.controller.mqtt_pub(self.cmd_topic, '')
self.reportDrivers()
drivers = [{'driver': 'ST', 'value': 0, 'uom': 78}
]
id = 'MQSW'
hint = [4, 2, 0, 0]
commands = {
'QUERY': query, 'DON': set_on, 'DOF': set_off
}
class MQSensor(polyinterface.Node):
def __init__(self, controller, primary, address, name, device):
super().__init__(controller, primary, address, name)
self.cmd_topic = device['cmd_topic']
self.on = False
self.motion = False
def start(self):
pass
def updateInfo(self, payload):
try:
data = json.loads(payload)
except Exception as ex:
LOGGER.error('Failed to parse MQTT Payload as Json: {} {}'.format(ex, payload))
return False
# motion detector
if 'motion' in data:
if data['motion'] == 'standby':
self.setDriver('ST', 0)
if self.motion:
self.motion = False
self.reportCmd('DOF')
else:
self.setDriver('ST', 1)
if not self.motion:
self.motion = True
self.reportCmd('DON')
else:
self.setDriver('ST', 0)
# temperature
if 'temperature' in data:
self.setDriver('CLITEMP', data['temperature'])
# heatIndex
if 'heatIndex' in data:
self.setDriver('GPV', data['heatIndex'])
# humidity
if 'humidity' in data:
self.setDriver('CLIHUM', data['humidity'])
# light detecor reading
if 'ldr' in data:
self.setDriver('LUMIN', data['ldr'])
# LED
if 'state' in data:
# LED is present
if data['state'] == 'ON':
self.setDriver('GV0', 100)
else:
self.setDriver('GV0', 0)
if 'brightness' in data:
self.setDriver('GV1', data['brightness'])
if 'color' in data:
if 'r' in data['color']:
self.setDriver('GV2', data['color']['r'])
if 'g' in data['color']:
self.setDriver('GV3', data['color']['g'])
if 'b' in data['color']:
self.setDriver('GV4', data['color']['b'])
def led_on(self, command):
self.controller.mqtt_pub(self.cmd_topic, json.dumps({'state': 'ON'}))
def led_off(self, command):
self.controller.mqtt_pub(self.cmd_topic, json.dumps({'state': 'OFF'}))
def led_set(self, command):
query = command.get('query')
red = self._check_limit(int(query.get('R.uom100')))
green = self._check_limit(int(query.get('G.uom100')))
blue = self._check_limit(int(query.get('B.uom100')))
brightness = self._check_limit(int(query.get('I.uom100')))
transition = int(query.get('D.uom58'))
flash = int(query.get('F.uom58'))
cmd = { 'state': 'ON', 'brightness': brightness, 'color': {'r': red, 'g': green, 'b': blue } }
if transition > 0:
cmd['transition'] = transition
if flash > 0:
cmd['flash'] = flash
self.controller.mqtt_pub(self.cmd_topic, json.dumps(cmd))
def _check_limit(self, value):
if value > 255:
return 255
elif value < 0:
return 0
else:
return value
def query(self, command=None):
self.reportDrivers()
drivers = [{'driver': 'ST', 'value': 0, 'uom': 2},
{'driver': 'CLITEMP', 'value': 0, 'uom': 17},
{'driver': 'GPV', 'value': 0, 'uom': 17},
{'driver': 'CLIHUM', 'value': 0, 'uom': 22},
{'driver': 'LUMIN', 'value': 0, 'uom': 36},
{'driver': 'GV0', 'value': 0, 'uom': 78},
{'driver': 'GV1', 'value': 0, 'uom': 100},
{'driver': 'GV2', 'value': 0, 'uom': 100},
{'driver': 'GV3', 'value': 0, 'uom': 100},
{'driver': 'GV4', 'value': 0, 'uom': 100}
]
id = 'MQSENS'
commands = {
'QUERY': query, 'DON': led_on, 'DOF': led_off, 'SETLED': led_set
}
# this is meant as a flag for if you have a sensor or condition on your IOT device
# which you want the device program rather than the ISY to flag
# FLAG-0 = OK
# FLAG-1 = NOK
# FLAG-2 = LO
# FLAG-3 = HI
# FLAG-4 = ERR
# FLAG-5 = IN
# FLAG-6 = OUT
# FLAG-7 = UP
# FLAG-8 = DOWN
# FLAG-9 = TRIGGER
# FLAG-10 = ON
# FLAG-11 = OFF
# FLAG-12 = ---
# payload is direct (like SW) not JSON encoded (like SENSOR)
# example device: liquid float {OK, LO, HI}
# example condition: IOT devices sensor connections {OK, NOK, ERR(OR)}
class MQFlag(polyinterface.Node):
def __init__(self, controller, primary, address, name, device):
super().__init__(controller, primary, address, name)
self.cmd_topic = device['cmd_topic']
def start(self):
pass
def updateInfo(self, payload):
if payload == 'OK':
self.setDriver('ST', 0)
elif payload == 'NOK':
self.setDriver('ST', 1)
elif payload == 'LO':
self.setDriver('ST', 2)
elif payload == 'HI':
self.setDriver('ST', 3)
elif payload == 'IN':
self.setDriver('ST', 5)
elif payload == 'OUT':
self.setDriver('ST', 6)
elif payload == 'UP':
self.setDriver('ST', 7)
elif payload == 'DOWN':
self.setDriver('ST', 8)
elif payload == 'TRIGGER':
self.setDriver('ST', 9)
elif payload == 'ON':
self.setDriver('ST', 10)
elif payload == 'OFF':
self.setDriver('ST', 11)
elif payload == '---':
self.setDriver('ST', 12)
else:
LOGGER.error('Invalid payload {}'.format(payload))
payload = 'ERR'
self.setDriver('ST', 4)
def reset_send(self, command):
self.controller.mqtt_pub(self.cmd_topic, 'RESET')
def query(self, command=None):
self.controller.mqtt_pub(self.cmd_topic, '')
self.reportDrivers()
drivers = [{'driver': 'ST', 'value': 0, 'uom': 25}
]
id = 'MQFLAG'
commands = {
'QUERY': query, 'RESET': reset_send
}
# This class is an attempt to add support for temperature/humidity sensors.
# It was originally developed with a DHT22, but should work with
# any of the following, since they I believe they get identified by tomaso the same:
# DHT21, AM2301, AM2302, AM2321
# Should be easy to add other temp/humdity sensors.
class MQdht(polyinterface.Node):
def __init__(self, controller, primary, address, name, device):
super().__init__(controller, primary, address, name)
self.on = False
def start(self):
pass
def updateInfo(self, payload):
try:
data = json.loads(payload)
except Exception as ex:
LOGGER.error('Failed to parse MQTT Payload as Json: {} {}'.format(ex, payload))
return False
if 'AM2301' in data:
self.setDriver('ST', 1)
self.setDriver('CLITEMP', data['AM2301']['Temperature'])
self.setDriver('CLIHUM', data['AM2301']['Humidity'])
else:
self.setDriver('ST', 0)
def query(self, command=None):
self.reportDrivers()
drivers = [{'driver': 'ST', 'value': 0, 'uom': 2},
{'driver': 'CLITEMP', 'value': 0, 'uom': 17},
{'driver': 'CLIHUM', 'value': 0, 'uom': 22}
]
id = 'MQDHT'
commands = {
'QUERY': query
}
# This class is an attempt to add support for temperature only sensors.
# was made for DS18B20 waterproof
class MQds(polyinterface.Node):
def __init__(self, controller, primary, address, name, device):
super().__init__(controller, primary, address, name)
self.on = False
def start(self):
pass
def updateInfo(self, payload):
try:
data = json.loads(payload)
except Exception as ex:
LOGGER.error('Failed to parse MQTT Payload as Json: {} {}'.format(ex, payload))
return False
if 'DS18B20' in data:
self.setDriver('ST', 1)
self.setDriver('CLITEMP', data['DS18B20']['Temperature'])
else:
self.setDriver('ST', 0)
def query(self, command=None):
self.reportDrivers()
drivers = [{'driver': 'ST', 'value': 0, 'uom': 2},
{'driver': 'CLITEMP', 'value': 0, 'uom': 17}
]
id = 'MQDS'
commands = {
'QUERY': query
}
# This class is an attempt to add support for temperature/humidity/pressure sensors.
# Currently supports the BME280. Could be extended to accept others.
class MQbme(polyinterface.Node):
def __init__(self, controller, primary, address, name, device):
super().__init__(controller, primary, address, name)
self.on = False
def start(self):
pass
def updateInfo(self, payload):
try:
data = json.loads(payload)
except Exception as ex:
LOGGER.error('Failed to parse MQTT Payload as Json: {} {}'.format(ex, payload))
return False
if 'BME280' in data:
self.setDriver('ST', 1)
self.setDriver('CLITEMP', data['BME280']['Temperature'])
self.setDriver('CLIHUM', data['BME280']['Humidity'])
# Converting to "Hg, could do this in sonoff-tomasto
# or just report the raw hPA (or convert to kPA).
press = format(round(float('.02952998751') * float(data['BME280']['Pressure']),2))
self.setDriver('BARPRES', press)
else:
self.setDriver('ST', 0)
def query(self, command=None):
self.reportDrivers()
drivers = [{'driver': 'ST', 'value': 0, 'uom': 2},
{'driver': 'CLITEMP', 'value': 0, 'uom': 17},
{'driver': 'CLIHUM', 'value': 0, 'uom': 22},
{'driver': 'BARPRES', 'value': 0, 'uom': 23}
]
id = 'MQBME'
commands = {
'QUERY': query
}
# This class is an attempt to add support for HC-SR04 Ultrasonic Sensor.
# Returns distance in centimeters.
class MQhcsr(polyinterface.Node):
def __init__(self, controller, primary, address, name, device):
super().__init__(controller, primary, address, name)
self.on = False
def start(self):
pass
def updateInfo(self, payload):
try:
data = json.loads(payload)
except Exception as ex:
LOGGER.error('Failed to parse MQTT Payload as Json: {} {}'.format(ex, payload))
return False
if 'SR04' in data:
self.setDriver('ST', 1)
self.setDriver('DISTANC', data['SR04']['Distance'])
else:
self.setDriver('ST', 0)
self.setDriver('DISTANC', 0)
def query(self, command=None):
self.reportDrivers()
drivers = [{'driver': 'ST', 'value': 0, 'uom': 2},
{'driver': 'DISTANC', 'value': 0, 'uom': 5}
]
id = 'MQHCSR'
commands = {
'QUERY': query
}
# General purpose Analog input using ADC.
# Setting max value in editor.xml as 1024, as that would be the max for
# onboard ADC, but that might need to be changed for external ADCs.
class MQAnalog(polyinterface.Node):
def __init__(self, controller, primary, address, name, device):
super().__init__(controller, primary, address, name)
self.on = False
def start(self):
pass
def updateInfo(self, payload):
try:
data = json.loads(payload)
except Exception as ex:
LOGGER.error('Failed to parse MQTT Payload as Json: {} {}'.format(ex, payload))
return False
if 'ANALOG' in data:
self.setDriver('ST', 1)
self.setDriver('GPV', data['ANALOG']['A0'])
else:
self.setDriver('ST', 0)
self.setDriver('GPV', 0)
def query(self, command=None):
self.reportDrivers()
# GPV = "General Purpose Value"
# UOM:56 = "The raw value reported by device"
drivers = [{'driver': 'ST', 'value': 0, 'uom': 2},
{'driver': 'GPV', 'value': 0, 'uom': 56}
]
id = 'MQANAL'
commands = {
'QUERY': query
}
# Reading the telemetry data for a Sonoff S31 (use the switch for control)
class MQs31(polyinterface.Node):
def __init__(self, controller, primary, address, name, device):
super().__init__(controller, primary, address, name)
self.on = False
def start(self):
pass
def updateInfo(self, payload):
try:
data = json.loads(payload)
except Exception as ex:
LOGGER.error('Failed to parse MQTT Payload as Json: {} {}'.format(ex, payload))
return False
if 'ENERGY' in data:
self.setDriver('ST', 1)
self.setDriver('CC', data['ENERGY']['Current'])
self.setDriver('CPW', data['ENERGY']['Power'])
self.setDriver('CV', data['ENERGY']['Voltage'])
self.setDriver('PF', data['ENERGY']['Factor'])
self.setDriver('TPW', data['ENERGY']['Total'])
else:
self.setDriver('ST', 0)
def query(self, command=None):
self.reportDrivers()
drivers = [{'driver': 'ST', 'value': 0, 'uom': 2},
{'driver': 'CC', 'value': 0, 'uom': 1},
{'driver': 'CPW', 'value': 0, 'uom': 73},
{'driver': 'CV', 'value': 0, 'uom': 72},
{'driver': 'PF', 'value': 0, 'uom': 53},
{'driver': 'TPW', 'value': 0, 'uom': 33}
]
id = 'MQS31'
commands = {
'QUERY': query
}
class MQraw(polyinterface.Node):
def __init__(self, controller, primary, address, name, device):
super().__init__(controller, primary, address, name)
self.cmd_topic = device['cmd_topic']
self.on = False
def start(self):
pass
def updateInfo(self, payload):
try :
self.setDriver('ST', int(payload))
except Exception as ex :
LOGGER.error('Failed to parse MQTT Payload as Json: {} {}'.format(ex, payload))
def query(self, command=None):
self.controller.mqtt_pub(self.cmd_topic, '')
self.reportDrivers()
drivers = [{'driver': 'ST', 'value': 0, 'uom': 56}
]
id = 'MQSR'
hint = [4, 2, 0, 0]
commands = {
'QUERY': query
}
if __name__ == "__main__":
try:
polyglot = polyinterface.Interface('MQTT')
polyglot.start()
control = Controller(polyglot)
control.runForever()
except (KeyboardInterrupt, SystemExit):
sys.exit(0)
|
en
| 0.8745
|
#!/usr/bin/env python3 # example: [ {'id': 'sonoff1', 'type': 'switch', 'status_topic': 'stat/sonoff1/power', 'cmd_topic': 'cmnd/sonoff1/power'} ] # LOGGER.setLevel(logging.INFO) # motion detector # temperature # heatIndex # humidity # light detecor reading # LED # LED is present # this is meant as a flag for if you have a sensor or condition on your IOT device # which you want the device program rather than the ISY to flag # FLAG-0 = OK # FLAG-1 = NOK # FLAG-2 = LO # FLAG-3 = HI # FLAG-4 = ERR # FLAG-5 = IN # FLAG-6 = OUT # FLAG-7 = UP # FLAG-8 = DOWN # FLAG-9 = TRIGGER # FLAG-10 = ON # FLAG-11 = OFF # FLAG-12 = --- # payload is direct (like SW) not JSON encoded (like SENSOR) # example device: liquid float {OK, LO, HI} # example condition: IOT devices sensor connections {OK, NOK, ERR(OR)} # This class is an attempt to add support for temperature/humidity sensors. # It was originally developed with a DHT22, but should work with # any of the following, since they I believe they get identified by tomaso the same: # DHT21, AM2301, AM2302, AM2321 # Should be easy to add other temp/humdity sensors. # This class is an attempt to add support for temperature only sensors. # was made for DS18B20 waterproof # This class is an attempt to add support for temperature/humidity/pressure sensors. # Currently supports the BME280. Could be extended to accept others. # Converting to "Hg, could do this in sonoff-tomasto # or just report the raw hPA (or convert to kPA). # This class is an attempt to add support for HC-SR04 Ultrasonic Sensor. # Returns distance in centimeters. # General purpose Analog input using ADC. # Setting max value in editor.xml as 1024, as that would be the max for # onboard ADC, but that might need to be changed for external ADCs. # GPV = "General Purpose Value" # UOM:56 = "The raw value reported by device" # Reading the telemetry data for a Sonoff S31 (use the switch for control)
| 2.484437
| 2
|
codebase/people_counter HOrizontal.py
|
hiteshsdata/Adaptive-Optimum-Scheduling-of-campus-buses
| 3
|
6625696
|
# USAGE
# To read and write back out to video:
# python people_counter.py --prototxt mobilenet_ssd/MobileNetSSD_deploy.prototxt \
# --model mobilenet_ssd/MobileNetSSD_deploy.caffemodel --input videos/example_01.mp4 \
# --output output/output_01.avi
#
# To read from webcam and write back out to disk:
# python people_counter.py --prototxt mobilenet_ssd/MobileNetSSD_deploy.prototxt \
# --model mobilenet_ssd/MobileNetSSD_deploy.caffemodel \
# --output output/webcam_output.avi
# import the necessary packages
from pyimagesearch.centroidtracker import CentroidTracker
from pyimagesearch.trackableobject import TrackableObject
from imutils.video import VideoStream
from imutils.video import FPS
import numpy as np
import argparse
import imutils
import time
import dlib
import cv2
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", required=True,
help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", required=True,
help="path to Caffe pre-trained model")
ap.add_argument("-i", "--input", type=str,
help="path to optional input video file")
ap.add_argument("-o", "--output", type=str,
help="path to optional output video file")
ap.add_argument("-c", "--confidence", type=float, default=0.4,
help="minimum probability to filter weak detections")
ap.add_argument("-s", "--skip-frames", type=int, default=30,
help="# of skip frames between detections")
args = vars(ap.parse_args())
# initialize the list of class labels MobileNet SSD was trained to
# detect
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
# load our serialized model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
# if a video path was not supplied, grab a reference to the webcam
if not args.get("input", False):
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)
# otherwise, grab a reference to the video file
else:
print("[INFO] opening video file...")
vs = cv2.VideoCapture(args["input"])
# initialize the video writer (we'll instantiate later if need be)
writer = None
# initialize the frame dimensions (we'll set them as soon as we read
# the first frame from the video)
W = None
H = None
# instantiate our centroid tracker, then initialize a list to store
# each of our dlib correlation trackers, followed by a dictionary to
# map each unique object ID to a TrackableObject
ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
trackers = []
trackableObjects = {}
# initialize the total number of frames processed thus far, along
# with the total number of objects that have moved either up or down
totalFrames = 0
totalDown = 0
totalUp = 0
# start the frames per second throughput estimator
fps = FPS().start()
# loop over frames from the video stream
while True:
# grab the next frame and handle if we are reading from either
# VideoCapture or VideoStream
frame = vs.read()
frame = frame[1] if args.get("input", False) else frame
# if we are viewing a video and we did not grab a frame then we
# have reached the end of the video
if args["input"] is not None and frame is None:
break
# resize the frame to have a maximum width of 500 pixels (the
# less data we have, the faster we can process it), then convert
# the frame from BGR to RGB for dlib
frame = imutils.resize(frame, width=500)
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# if the frame dimensions are empty, set them
if W is None or H is None:
(H, W) = frame.shape[:2]
# if we are supposed to be writing a video to disk, initialize
# the writer
if args["output"] is not None and writer is None:
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
writer = cv2.VideoWriter(args["output"], fourcc, 30,
(W, H), True)
# initialize the current status along with our list of bounding
# box rectangles returned by either (1) our object detector or
# (2) the correlation trackers
status = "Waiting"
rects = []
# check to see if we should run a more computationally expensive
# object detection method to aid our tracker
if totalFrames % args["skip_frames"] == 0:
# set the status and initialize our new set of object trackers
status = "Detecting"
trackers = []
# convert the frame to a blob and pass the blob through the
# network and obtain the detections
blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
net.setInput(blob)
detections = net.forward()
# loop over the detections
for i in np.arange(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated
# with the prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by requiring a minimum
# confidence
if confidence > args["confidence"]:
# extract the index of the class label from the
# detections list
idx = int(detections[0, 0, i, 1])
# if the class label is not a person, ignore it
if CLASSES[idx] != "person":
continue
# compute the (x, y)-coordinates of the bounding box
# for the object
box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
(startX, startY, endX, endY) = box.astype("int")
# construct a dlib rectangle object from the bounding
# box coordinates and then start the dlib correlation
# tracker
tracker = dlib.correlation_tracker()
rect = dlib.rectangle(startX, startY, endX, endY)
tracker.start_track(rgb, rect)
# add the tracker to our list of trackers so we can
# utilize it during skip frames
trackers.append(tracker)
# otherwise, we should utilize our object *trackers* rather than
# object *detectors* to obtain a higher frame processing throughput
else:
# loop over the trackers
for tracker in trackers:
# set the status of our system to be 'tracking' rather
# than 'waiting' or 'detecting'
status = "Tracking"
# update the tracker and grab the updated position
tracker.update(rgb)
pos = tracker.get_position()
# unpack the position object
startX = int(pos.left())
startY = int(pos.top())
endX = int(pos.right())
endY = int(pos.bottom())
# add the bounding box coordinates to the rectangles list
rects.append((startX, startY, endX, endY))
# draw a horizontal line in the center of the frame -- once an
# object crosses this line we will determine whether they were
# moving 'up' or 'down'
cv2.line(frame, (W // 2,0), (W//2, H), (0, 255, 255), 2)
# use the centroid tracker to associate the (1) old object
# centroids with (2) the newly computed object centroids
objects = ct.update(rects)
# loop over the tracked objects
for (objectID, centroid) in objects.items():
# check to see if a trackable object exists for the current
# object ID
to = trackableObjects.get(objectID, None)
# if there is no existing trackable object, create one
if to is None:
to = TrackableObject(objectID, centroid)
# otherwise, there is a trackable object so we can utilize it
# to determine direction
else:
# the difference between the y-coordinate of the *current*
# centroid and the mean of *previous* centroids will tell
# us in which direction the object is moving (negative for
# 'up' and positive for 'down')
x = [c[0] for c in to.centroids]
direction = centroid[0] - np.mean(x)
to.centroids.append(centroid)
# check to see if the object has been counted or not
if not to.counted:
# if the direction is negative (indicating the object
# is moving up) AND the centroid is above the center
# line, count the object
if direction < 0 and centroid[0] < W // 2:
totalUp += 1
to.counted = True
# if the direction is positive (indicating the object
# is moving down) AND the centroid is below the
# center line, count the object
elif direction > 0 and centroid[0] > W // 2:
totalDown += 1
to.counted = True
# store the trackable object in our dictionary
trackableObjects[objectID] = to
# draw both the ID of the object and the centroid of the
# object on the output frame
text = "ID {}".format(objectID)
cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
# construct a tuple of information we will be displaying on the
# frame
info = [
("Left", totalUp),
("Right", totalDown),
("Status", status),
]
# loop over the info tuples and draw them on our frame
for (i, (k, v)) in enumerate(info):
text = "{}: {}".format(k, v)
cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
# check to see if we should write the frame to disk
if writer is not None:
writer.write(frame)
# show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# increment the total number of frames processed thus far and
# then update the FPS counter
totalFrames += 1
fps.update()
# stop the timer and display FPS information
fps.stop()
print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# check to see if we need to release the video writer pointer
if writer is not None:
writer.release()
# if we are not using a video file, stop the camera video stream
if not args.get("input", False):
vs.stop()
# otherwise, release the video file pointer
else:
vs.release()
# close any open windows
cv2.destroyAllWindows()
|
# USAGE
# To read and write back out to video:
# python people_counter.py --prototxt mobilenet_ssd/MobileNetSSD_deploy.prototxt \
# --model mobilenet_ssd/MobileNetSSD_deploy.caffemodel --input videos/example_01.mp4 \
# --output output/output_01.avi
#
# To read from webcam and write back out to disk:
# python people_counter.py --prototxt mobilenet_ssd/MobileNetSSD_deploy.prototxt \
# --model mobilenet_ssd/MobileNetSSD_deploy.caffemodel \
# --output output/webcam_output.avi
# import the necessary packages
from pyimagesearch.centroidtracker import CentroidTracker
from pyimagesearch.trackableobject import TrackableObject
from imutils.video import VideoStream
from imutils.video import FPS
import numpy as np
import argparse
import imutils
import time
import dlib
import cv2
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", required=True,
help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", required=True,
help="path to Caffe pre-trained model")
ap.add_argument("-i", "--input", type=str,
help="path to optional input video file")
ap.add_argument("-o", "--output", type=str,
help="path to optional output video file")
ap.add_argument("-c", "--confidence", type=float, default=0.4,
help="minimum probability to filter weak detections")
ap.add_argument("-s", "--skip-frames", type=int, default=30,
help="# of skip frames between detections")
args = vars(ap.parse_args())
# initialize the list of class labels MobileNet SSD was trained to
# detect
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
# load our serialized model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
# if a video path was not supplied, grab a reference to the webcam
if not args.get("input", False):
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)
# otherwise, grab a reference to the video file
else:
print("[INFO] opening video file...")
vs = cv2.VideoCapture(args["input"])
# initialize the video writer (we'll instantiate later if need be)
writer = None
# initialize the frame dimensions (we'll set them as soon as we read
# the first frame from the video)
W = None
H = None
# instantiate our centroid tracker, then initialize a list to store
# each of our dlib correlation trackers, followed by a dictionary to
# map each unique object ID to a TrackableObject
ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
trackers = []
trackableObjects = {}
# initialize the total number of frames processed thus far, along
# with the total number of objects that have moved either up or down
totalFrames = 0
totalDown = 0
totalUp = 0
# start the frames per second throughput estimator
fps = FPS().start()
# loop over frames from the video stream
while True:
# grab the next frame and handle if we are reading from either
# VideoCapture or VideoStream
frame = vs.read()
frame = frame[1] if args.get("input", False) else frame
# if we are viewing a video and we did not grab a frame then we
# have reached the end of the video
if args["input"] is not None and frame is None:
break
# resize the frame to have a maximum width of 500 pixels (the
# less data we have, the faster we can process it), then convert
# the frame from BGR to RGB for dlib
frame = imutils.resize(frame, width=500)
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# if the frame dimensions are empty, set them
if W is None or H is None:
(H, W) = frame.shape[:2]
# if we are supposed to be writing a video to disk, initialize
# the writer
if args["output"] is not None and writer is None:
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
writer = cv2.VideoWriter(args["output"], fourcc, 30,
(W, H), True)
# initialize the current status along with our list of bounding
# box rectangles returned by either (1) our object detector or
# (2) the correlation trackers
status = "Waiting"
rects = []
# check to see if we should run a more computationally expensive
# object detection method to aid our tracker
if totalFrames % args["skip_frames"] == 0:
# set the status and initialize our new set of object trackers
status = "Detecting"
trackers = []
# convert the frame to a blob and pass the blob through the
# network and obtain the detections
blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
net.setInput(blob)
detections = net.forward()
# loop over the detections
for i in np.arange(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated
# with the prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by requiring a minimum
# confidence
if confidence > args["confidence"]:
# extract the index of the class label from the
# detections list
idx = int(detections[0, 0, i, 1])
# if the class label is not a person, ignore it
if CLASSES[idx] != "person":
continue
# compute the (x, y)-coordinates of the bounding box
# for the object
box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])
(startX, startY, endX, endY) = box.astype("int")
# construct a dlib rectangle object from the bounding
# box coordinates and then start the dlib correlation
# tracker
tracker = dlib.correlation_tracker()
rect = dlib.rectangle(startX, startY, endX, endY)
tracker.start_track(rgb, rect)
# add the tracker to our list of trackers so we can
# utilize it during skip frames
trackers.append(tracker)
# otherwise, we should utilize our object *trackers* rather than
# object *detectors* to obtain a higher frame processing throughput
else:
# loop over the trackers
for tracker in trackers:
# set the status of our system to be 'tracking' rather
# than 'waiting' or 'detecting'
status = "Tracking"
# update the tracker and grab the updated position
tracker.update(rgb)
pos = tracker.get_position()
# unpack the position object
startX = int(pos.left())
startY = int(pos.top())
endX = int(pos.right())
endY = int(pos.bottom())
# add the bounding box coordinates to the rectangles list
rects.append((startX, startY, endX, endY))
# draw a horizontal line in the center of the frame -- once an
# object crosses this line we will determine whether they were
# moving 'up' or 'down'
cv2.line(frame, (W // 2,0), (W//2, H), (0, 255, 255), 2)
# use the centroid tracker to associate the (1) old object
# centroids with (2) the newly computed object centroids
objects = ct.update(rects)
# loop over the tracked objects
for (objectID, centroid) in objects.items():
# check to see if a trackable object exists for the current
# object ID
to = trackableObjects.get(objectID, None)
# if there is no existing trackable object, create one
if to is None:
to = TrackableObject(objectID, centroid)
# otherwise, there is a trackable object so we can utilize it
# to determine direction
else:
# the difference between the y-coordinate of the *current*
# centroid and the mean of *previous* centroids will tell
# us in which direction the object is moving (negative for
# 'up' and positive for 'down')
x = [c[0] for c in to.centroids]
direction = centroid[0] - np.mean(x)
to.centroids.append(centroid)
# check to see if the object has been counted or not
if not to.counted:
# if the direction is negative (indicating the object
# is moving up) AND the centroid is above the center
# line, count the object
if direction < 0 and centroid[0] < W // 2:
totalUp += 1
to.counted = True
# if the direction is positive (indicating the object
# is moving down) AND the centroid is below the
# center line, count the object
elif direction > 0 and centroid[0] > W // 2:
totalDown += 1
to.counted = True
# store the trackable object in our dictionary
trackableObjects[objectID] = to
# draw both the ID of the object and the centroid of the
# object on the output frame
text = "ID {}".format(objectID)
cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
# construct a tuple of information we will be displaying on the
# frame
info = [
("Left", totalUp),
("Right", totalDown),
("Status", status),
]
# loop over the info tuples and draw them on our frame
for (i, (k, v)) in enumerate(info):
text = "{}: {}".format(k, v)
cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
# check to see if we should write the frame to disk
if writer is not None:
writer.write(frame)
# show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# increment the total number of frames processed thus far and
# then update the FPS counter
totalFrames += 1
fps.update()
# stop the timer and display FPS information
fps.stop()
print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# check to see if we need to release the video writer pointer
if writer is not None:
writer.release()
# if we are not using a video file, stop the camera video stream
if not args.get("input", False):
vs.stop()
# otherwise, release the video file pointer
else:
vs.release()
# close any open windows
cv2.destroyAllWindows()
|
en
| 0.789422
|
# USAGE # To read and write back out to video: # python people_counter.py --prototxt mobilenet_ssd/MobileNetSSD_deploy.prototxt \ # --model mobilenet_ssd/MobileNetSSD_deploy.caffemodel --input videos/example_01.mp4 \ # --output output/output_01.avi # # To read from webcam and write back out to disk: # python people_counter.py --prototxt mobilenet_ssd/MobileNetSSD_deploy.prototxt \ # --model mobilenet_ssd/MobileNetSSD_deploy.caffemodel \ # --output output/webcam_output.avi # import the necessary packages # construct the argument parse and parse the arguments # initialize the list of class labels MobileNet SSD was trained to # detect # load our serialized model from disk # if a video path was not supplied, grab a reference to the webcam # otherwise, grab a reference to the video file # initialize the video writer (we'll instantiate later if need be) # initialize the frame dimensions (we'll set them as soon as we read # the first frame from the video) # instantiate our centroid tracker, then initialize a list to store # each of our dlib correlation trackers, followed by a dictionary to # map each unique object ID to a TrackableObject # initialize the total number of frames processed thus far, along # with the total number of objects that have moved either up or down # start the frames per second throughput estimator # loop over frames from the video stream # grab the next frame and handle if we are reading from either # VideoCapture or VideoStream # if we are viewing a video and we did not grab a frame then we # have reached the end of the video # resize the frame to have a maximum width of 500 pixels (the # less data we have, the faster we can process it), then convert # the frame from BGR to RGB for dlib # if the frame dimensions are empty, set them # if we are supposed to be writing a video to disk, initialize # the writer # initialize the current status along with our list of bounding # box rectangles returned by either (1) our object detector or # (2) the correlation trackers # check to see if we should run a more computationally expensive # object detection method to aid our tracker # set the status and initialize our new set of object trackers # convert the frame to a blob and pass the blob through the # network and obtain the detections # loop over the detections # extract the confidence (i.e., probability) associated # with the prediction # filter out weak detections by requiring a minimum # confidence # extract the index of the class label from the # detections list # if the class label is not a person, ignore it # compute the (x, y)-coordinates of the bounding box # for the object # construct a dlib rectangle object from the bounding # box coordinates and then start the dlib correlation # tracker # add the tracker to our list of trackers so we can # utilize it during skip frames # otherwise, we should utilize our object *trackers* rather than # object *detectors* to obtain a higher frame processing throughput # loop over the trackers # set the status of our system to be 'tracking' rather # than 'waiting' or 'detecting' # update the tracker and grab the updated position # unpack the position object # add the bounding box coordinates to the rectangles list # draw a horizontal line in the center of the frame -- once an # object crosses this line we will determine whether they were # moving 'up' or 'down' # use the centroid tracker to associate the (1) old object # centroids with (2) the newly computed object centroids # loop over the tracked objects # check to see if a trackable object exists for the current # object ID # if there is no existing trackable object, create one # otherwise, there is a trackable object so we can utilize it # to determine direction # the difference between the y-coordinate of the *current* # centroid and the mean of *previous* centroids will tell # us in which direction the object is moving (negative for # 'up' and positive for 'down') # check to see if the object has been counted or not # if the direction is negative (indicating the object # is moving up) AND the centroid is above the center # line, count the object # if the direction is positive (indicating the object # is moving down) AND the centroid is below the # center line, count the object # store the trackable object in our dictionary # draw both the ID of the object and the centroid of the # object on the output frame # construct a tuple of information we will be displaying on the # frame # loop over the info tuples and draw them on our frame # check to see if we should write the frame to disk # show the output frame # if the `q` key was pressed, break from the loop # increment the total number of frames processed thus far and # then update the FPS counter # stop the timer and display FPS information # check to see if we need to release the video writer pointer # if we are not using a video file, stop the camera video stream # otherwise, release the video file pointer # close any open windows
| 2.82059
| 3
|
awx/main/migrations/0039_v330_custom_venv_help_text.py
|
DamoR25/awxnew
| 11,396
|
6625697
|
<gh_stars>1000+
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-05-23 20:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0038_v330_add_deleted_activitystream_actor'),
]
operations = [
migrations.AlterField(
model_name='jobtemplate',
name='custom_virtualenv',
field=models.CharField(
blank=True, default=None, help_text='Local absolute file path containing a custom Python virtualenv to use', max_length=100, null=True
),
),
migrations.AlterField(
model_name='organization',
name='custom_virtualenv',
field=models.CharField(
blank=True, default=None, help_text='Local absolute file path containing a custom Python virtualenv to use', max_length=100, null=True
),
),
migrations.AlterField(
model_name='project',
name='custom_virtualenv',
field=models.CharField(
blank=True, default=None, help_text='Local absolute file path containing a custom Python virtualenv to use', max_length=100, null=True
),
),
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-05-23 20:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0038_v330_add_deleted_activitystream_actor'),
]
operations = [
migrations.AlterField(
model_name='jobtemplate',
name='custom_virtualenv',
field=models.CharField(
blank=True, default=None, help_text='Local absolute file path containing a custom Python virtualenv to use', max_length=100, null=True
),
),
migrations.AlterField(
model_name='organization',
name='custom_virtualenv',
field=models.CharField(
blank=True, default=None, help_text='Local absolute file path containing a custom Python virtualenv to use', max_length=100, null=True
),
),
migrations.AlterField(
model_name='project',
name='custom_virtualenv',
field=models.CharField(
blank=True, default=None, help_text='Local absolute file path containing a custom Python virtualenv to use', max_length=100, null=True
),
),
]
|
en
| 0.686883
|
# -*- coding: utf-8 -*- # Generated by Django 1.11.11 on 2018-05-23 20:17
| 1.649288
| 2
|
parser/team19/BDTytus/Gramatica/Gramatica.py
|
XiomRB/tytus
| 1
|
6625698
|
import Errores.Nodo_Error as err
from ply import lex
from AST.Sentencias import Raiz, Sentencia
import AST.SentenciasDDL as DDL
import ply.yacc as yacc
reservadas = {
'select': 't_select',
'distinct': 't_distinct',
'as': 't_as',
'from': 't_from',
'where': 't_where',
'having': 't_having',
'avg': 't_avg',
'min': 't_min',
'max': 't_max',
'sum': 't_sum',
'count': 't_count',
'insert': 't_insert',
'into': 't_into',
'values': 't_values',
'delete': 't_delete',
'update': 't_update',
'true': 't_true',
'false': 't_false',
'not': 't_not',
'and': 't_and',
'or': 't_or',
'smallint': 't_smallint',
'integer': 't_integer',
'bigint': 't_bigint',
'decimal': 't_decimal',
'numeric': 't_numeric',
'real': 't_real',
'double': 't_double',
'precision': 't_precision',
'money': 't_money',
'character': 't_character',
'varying': 't_varying',
'varchar': 't_varchar',
'char': 't_charn',
'text': 't_text',
'boolean': 't_boolean',
'bool': 't_bool',
'type': 't_type',
'enum': 't_enum',
'create': 't_create',
'replace': 't_replace',
'database': 't_database',
'if': 't_if',
'exists': 't_exists',
'owner': 't_owner',
'mode': 't_mode',
'show': 't_show',
'databases': 't_databases',
'like': 't_like',
'alter': 't_alter',
'rename': 't_rename',
'to': 't_to',
'current_user': 't_current_user',
'session_user': 't_session_user',
'drop': 't_drop',
'table': 't_table',
'delete': 't_delete',
'only': 't_only',
'using': 't_using',
'current': 't_current',
'of': 't_of',
'returning': 't_returning',
'as': 't_as',
'inherits': 't_inherits',
'primary': 't_primary',
'key': 't_key',
'references': 't_references',
'foreign': 't_foreign',
'null': 't_null',
'constraint': 't_constraint',
'unique': 't_unique',
'check': 't_check',
'add': 't_add',
'set': 't_set',
'rename': 't_rename',
'column': 't_column',
'inner': 't_inner',
'left': 't_left',
'right': 't_right',
'full': 't_full',
'outer': 't_outer',
'join': 't_join',
'natural': 't_natural',
'on': 't_on',
'abs': 't_abs',
'cbrt': 't_cbrt',
'ceil': 't_ceil',
'ceiling': 't_ceiling',
'degrees': 't_degrees',
'div': 't_div',
'exp': 't_exp',
'factorial': 't_factorial',
'floor': 't_floor',
'gcd': 't_gcd',
'ln': 't_ln',
'log': 't_log',
'mod': 't_mod',
'pi': 't_pi',
'power': 't_power',
'radians': 't_radians',
'round': 't_round',
'use': 't_use',
'default' : 't_default',
'acos' : 't_acos',
'acosd' : 't_acosd',
'asin' : 't_asin',
'asind' : 't_asind',
'atan' : 't_atan',
'atand' : 't_atand',
'atan2' : 't_atan2',
'atan2d' : 't_atan2d',
'cos' : 't_cos',
'cosd' : 't_cosd',
'cot' : 't_cot',
'cotd' : 't_cotd',
'sin' : 't_sin',
'sind' : 't_sind',
'tan' : 't_tan',
'tand' : 't_tand',
'sinh' : 't_sinh',
'cosh' : 't_cosh',
'tanh' : 't_tanh',
'asinh' : 't_asinh',
'acosh' : 't_acosh',
'atanh' : 't_atanh',
'min_scale' : 't_min_scale',
'scale' : 't_scale',
'sign' : 't_sign',
'sqrt' : 't_sqrt',
'trim_scale' : 't_trim_scale',
'trunc' : 't_trunc',
'width_bucket' : 't_width_bucket',
'random' : 't_random',
'setseed' : 't_setseed',
'length' : 't_length',
'substring' : 't_substring',
'trim' : 't_trim',
'md5' : 't_md5',
'sha256' : 't_sha256',
'substr' : 't_substr',
'get_byte' : 't_get_byte',
'set_byte' : 't_set_byte',
'convert' : 't_convert',
'encode' : 't_encode',
'decode' : 't_decode'
}
tokens = [
'par1',
'par2',
'cor1',
'cor2',
'asterisco',
'mas',
'menos',
'pyc',
'coma',
'div',
'punto',
'igual',
'menor',
'mayor',
'menori',
'mayori',
'diferente',
'porcentaje',
'diferentede',
'pot',
'bipunto',
'id',
'decimal',
'entero',
'char',
'string'
] + list(reservadas.values())
# Tokens
t_par1 = r'\('
t_par2 = r'\)'
t_cor1 = r'\['
t_cor2 = r'\]'
t_pyc = r';'
t_punto = r'\.'
t_coma = r'\,'
t_igual = r'\='
t_mas = r'\+'
t_menos = r'-'
t_asterisco = r'\*'
t_div = r'/'
t_mayor = r'>'
t_menor = r'<'
t_mayori = r'>='
t_menori = r'<='
t_diferente = r'!='
t_porcentaje = r'\%'
t_pot = r'\^'
t_bipunto = r'::'
t_diferentede = r'<>'
def t_decimal(t):
r'\d+\.\d+'
try:
t.value = float(t.value)
except ValueError:
print("El valor es muy grande %d", t.value)
t.value = 0
return t
def t_entero(t):
r'\d+'
try:
t.value = int(t.value)
except ValueError:
print("El valor de integer es muy grande %d", t.value)
t.value = 0
return t
def t_char(t):
r'\'.*?\''
t.value = t.value[1:-1] # se remueven comillas
return t
def t_string(t):
r'\".*?\"'
t.value = t.value[1:-1] # se remueven comillas
return t
def t_id(t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
t.type = reservadas.get(t.value.lower(), 'id') # Check for reserved words
return t
def t_COMENTARIO_MULTILINEA(t):
r'/\*(.|\n)*?\*/'
t.lexer.lineno += t.value.count('\n')
# Comentario simple // ...
def t_COMENTARIO_SIMPLE(t):
r'--.*\n'
t.lexer.lineno += 1
# Caracteres ignorados
t_ignore = " \t"
def t_newline(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
def find_column(input, token):
line_start = input.rfind('\n', 0, token.lexpos) + 1
return (token.lexpos - line_start) + 1
def t_error(t):
ListaErrores.insertar(err.Nodo_Error("Lexico", "Caracter no valido '%s'" % t.value[0],
t.lineno, find_column(input, t)))
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
lexer = lex.lex()
precedence = (
('left', 'punto', 'bipunto'),
('left', 'coma'),
('right', 'igual'),
('left', 'cor1', 'cor2'),
('left', 'mas', 'menos'),
('left', 'asterisco', 'div', 'porcentaje'),
('left', 'pot'),
('right', 'umenos', 'umas'),
('left', 'par1', 'par2'),
# Between , in , like, ilike, simiar, is isnull notnull
('left', 't_or'),
('left', 't_and'),
('left', 'diferente'),
('left', 'mayor', 'menor', 'mayori', 'menori'),
('right', 't_not')
)
from AST.Expresiones import *
from AST.SentenciasDML import *
reporteg = ''
def p_sql(p):
'SQL : Sentencias_SQL'
p[0] = Raiz(ListaErrores, p[1])
def p_sql2(p):
'SQL : empty'
p[0] = Raiz(ListaErrores)
def p_Sentencias_SQL_Sentencia_SQL(p):
'Sentencias_SQL : Sentencias_SQL Sentencia_SQL'
p[0] = p[1] + [p[2]]
concatenar_gramatica('\n <TR><TD> SENTENCIAS_SQL ::= SENTENCIAS_SQL SENTENCIA_SQL </TD> <TD> { sentencias_sql.lista = sentencias_sql.lista.add(sentencia_sql.lista) } </TD></TR> ')
def p_Sentencias_SQL(p):
'Sentencias_SQL : Sentencia_SQL'
p[0] = [p[1]]
concatenar_gramatica('\n <TR><TD> SENTENCIAS_SQL ::= SENTENCIA_SQL </TD> <TD> { sentencias_sql.lista = [sentencia_sql] } </TD></TR>')
def p_Sentencia_SQL_DML(p):
'Sentencia_SQL : Sentencias_DML'
p[0] = Sentencia("SentenciaDML", [p[1]])
concatenar_gramatica('\n <TR><TD> SENTENCIA_SQL ::= SENTENCIAS_DML </TD> <TD> { sentencia_sql.inst = sentencias_dml.inst } </TD></TR>')
#def p_Sentencia_SQL_DML(p):
# 'Sentencia_SQL : EXP pyc'
# p[0] = Sentencia("EXP", [p[1]])
def p_Sentencia_SQL_DDL(p):
'Sentencia_SQL : Sentencias_DDL'
p[0] = Sentencia("SentenciaDDL", [p[1]])
concatenar_gramatica('\n <TR><TD> SENTENCIA_SQL ::= SENTENCIAS_DDL </TD> <TD> { sentencia_sql.inst = sentencias_dll.inst } </TD></TR>')
# -------------------------------------------------------------SENTENCIAS DML
def p_Sentencias_DML(p):
'''Sentencias_DML : t_select Lista_EXP Select_SQL Condiciones pyc
| t_select asterisco Select_SQL Condiciones pyc
| t_insert t_into id Insert_SQL pyc
| t_update id t_set Lista_EXP t_where EXP pyc
| t_delete t_from id Condiciones pyc
| t_use id pyc'''
if p[1] == 'select':
p[0] = Select(p[2], p[3], p[4], p.slice[2].lineno, find_column(input, p.slice[2]))
concatenar_gramatica('\n <TR><TD> SENTENCIAS_DML ::= select' + str(p[2]) + 'SELECT_SQL ; </TD><TD> { sentencias_dml.inst = select(lista_exp.lista, Select_SQL.val,Condiciones.val)} </TD></TR>')
elif p[1] == 'insert':
p[0] = Insert(p[3], p[4], p.slice[1].lineno, find_column(input, p.slice[1]))
concatenar_gramatica('\n <TR><TD> SENTENCIAS_DML ::= insert into id INSERT_SQL ; </TD> <TD> {sentencias_dml.inst = insert(id,Insert_SQL.inst)} </TD></TR>')
elif p[1] == 'update':
concatenar_gramatica('\n <TR><TD> SENTENCIAS_DML ::= update id set LISTA_EXP where EXP ; </TD> <TD> {sentencias_dml.inst = update(id, lista_exp.list, exp.val)} </TD></TR>')
elif p[1] == 'delete':
concatenar_gramatica('\n <TR><TD> SENTENCIAS_DML ::= delete from id CONDICIONES ; </TD> <TD> { sentencias_dml.inst = delete(id, Condiciones.val) } </TD></TR>')
else:
concatenar_gramatica('\n <TR><TD> SENTENCIAS_DML ::= use database id ; </TD> <TD> {sentencias_dml.inst = use(id)} </TD></TR>')
def p_Select_SQL(p):
'Select_SQL : t_from Table_Expression'
p[0] = p[2]
concatenar_gramatica('\n <TR><TD> SELECT_SQL ::= from TABLE_EXPRESSION CONDICIONES </TD> <TD> { select_sql.val = Table_Expression.val } </TD></TR>')
def p_Select2_SQL(p):
'Select_SQL : empty'
p[0] = []
concatenar_gramatica('\n <TR><TD> SELECT_SQL ::= EMPTY </TD> <TD> { select_sql.val = empty.val }</TD></TR>')
def p_Table_Expression(p):
'''Table_Expression : Alias_Tabla
| Subqueries'''
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> TABLE_EXPRESSION ::= ' + str(p[1]) + '</TD> <TD> { table_expression.val = ' + str(p[1]) + '.val } </TD></TR>')
def p_Alias_Tabla(p):
'''Alias_Tabla : Lista_ID
| Lista_Alias'''
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> ALIAS_TABLA ::= ' + str(p[1]) + '</TD> <TD> { alias_tabla.val = ' + str(p[1]) + '.list } </TD></TR>')
def p_Subqueries(p):
'''Subqueries : par1 t_select par2'''
concatenar_gramatica('\n <TR><TD> SUBQUERIES ::= ( select )</TD> <TD> { subqueries.inst = select() } </TD></TR>')
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> INSERT <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
def p_Insert_SQL(p):
'Insert_SQL : par1 Lista_ID par2 t_values par1 Lista_EXP par2'
p[0] = p[6]
concatenar_gramatica('\n <TR><TD> INSERT_SQL ::= ( LISTA_ID ) values ( LISTA_EXP ) </TD> <TD> { insert_sql.inst = insert1(lista_id.lista,lista_exp.lista)} </TD></TR>')
def p_Insert_SQL2(p):
'Insert_SQL : t_values par1 Lista_EXP par2'
p[0] = p[3]
concatenar_gramatica('\n <TR><TD> INSERT_SQL ::= values ( LISTA_EXP ) </TD> <TD> { insert_sql.inst = insert1(lista_exp.lista)} </TD></TR>')
def p_Condiciones(p):
'''Condiciones : t_where EXP
| empty'''
if len(p) == 3:
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> CONDICIONES ::= where EXP </TD> <TD> condiciones.val = exp.val </TD></TR>')
else:
p[0] = []
concatenar_gramatica('\n <TR><TD> INSERT_SQL ::= EMPTY </TD> <TD> { insert_sql.val = empty.val }</TD></TR>')
# ---------------------------- Sentencias DDL y Enum Type --------------
def p_Sentencias_DDL(p):
'''Sentencias_DDL : t_show t_databases pyc
| Enum_Type
| t_drop Drop pyc
| t_alter Alter pyc
| t_create Create pyc'''
if p[1].__class__.__name__ == 'CreateType':
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> SENTENCIAS_DDL ::= ENUM_TYPE </TD> <TD> { sentencias_ddl.inst = enum_type.inst } </TD></TR>')
elif p[1].upper() == 'SHOW':
p[0] = DDL.ShowDatabases(p.slice[1].lineno, find_column(input, p.slice[1]))
concatenar_gramatica('\n <TR><TD> SENTENCIAS_DDL ::= show databases ; </TD> <TD> { sentencias_ddl.inst = show() } </TD></TR>')
elif p[1].upper() == 'CREATE':
p[0] = None
concatenar_gramatica('\n <TR><TD> SENTENCIAS_DDL ::= create CREATE ; </TD> <TD> { sentencias_ddl.inst = create.inst} </TD></TR>')
elif p[1].upper() == 'DROP':
p[0] = None
concatenar_gramatica('\n <TR><TD> SENTENCIAS_DDL ::= drop Drop ; </TD> <TD> { sentencias_ddl.inst = drop.inst } </TD></TR>')
elif p[1].upper() == 'ALTER':
p[0] = None
concatenar_gramatica('\n <TR><TD> SENTENCIAS_DDL ::= alter ALTER ; </TD> <TD> { sentencias_ddl.inst = alter.inst }</TD></TR>')
else:
p[0] = None
def p_Enum_Type(p):
'Enum_Type : t_create t_type id t_as t_enum par1 Lista_Enum par2 pyc'
p[0] = DDL.CreateType(p[3].lower(), p[7], p.slice[1].lineno, find_column(input, p.slice[1]))
concatenar_gramatica('\n <TR><TD> ENUM_TYPE ::= create type id as enum ( LISTA_ENUM ) ; </TD> <TD> { enum_type.inst = createType(id,lista_Enum.val) } </TD></TR>')
def p_Drop(p):
'''Drop : t_database DropDB id
| t_table id '''
if p[1] == 'database':
concatenar_gramatica('\n <TR><TD> DROP ::= database DROPDB id </TD> <TD> { drop.inst = dropBD( id) } </TD></TR>')
else:
concatenar_gramatica('\n <TR><TD> DROP ::= table id </TD> <TD> {drop.inst = dropTb( id )} </TD></TR>')
def p_DropDB(p):
'''DropDB : t_if t_exists
| empty'''
if p[1] == 'if':
concatenar_gramatica('\n <TR><TD> DROPDB ::= if exists </TD> <TD> { dropdb.val = if exists} </TD></TR>')
else:
concatenar_gramatica('\n <TR><TD> DROPDB ::= EMPTY </TD> <TD> { dropdb.val = empty.val } </TD></TR>')
def p_Alter(p):
'''Alter : t_database id AlterDB
| t_table id AlterTB '''
if p[1] == 'database':
concatenar_gramatica('\n <TR><TD> ALTER ::= database id ALTERDB </TD> <TD> alter.inst = alterDB( id,alterdb.inst ) </TD></TR>')
else:
concatenar_gramatica('\n <TR><TD> ALTER ::= table id ALTERTB </TD> <TD> alter.inst = altertb(id, altertb.inst) </TD></TR>')
def p_AlterDB(p):
''' AlterDB : t_rename t_to id
| t_owner t_to SesionDB '''
if p[1] == 'rename':
concatenar_gramatica('\n <TR><TD> ALTERDB ::= rename to id </TD> <TD> { alterdb.val = rename id } </TD></TR>')
else:
concatenar_gramatica('\n <TR><TD> ALTERDB ::= owner to SESIONDB </TD> <TD> { alterdb.val = owner sessiondb.val} </TD> </TR>')
def p_SesionDB(p):
''' SesionDB : id
| t_current_user
| t_session_user '''
if p[1] == 'current_user':
concatenar_gramatica('\n <TR><TD> SESSIONDB ::= current_user </TD> <TD> { sessiondb.val = current_user } </TD></TR>')
elif p[1] == 'session_user':
concatenar_gramatica('\n <TR><TD> SESSIONDB ::= session_user </TD> <TD> { sessiondb.val = session_user } </TD></TR>')
else:
concatenar_gramatica('\n <TR><TD> SESSIONDB ::= id </TD> <TD> { sessiondb.val = id } </TD></TR>')
def p_AlterTB(p):
''' AlterTB : t_add Add_Opc
| t_drop Drop_Opc
| t_alter t_column Alter_Column
| t_rename t_column id t_to id '''
if p[1] == 'add':
concatenar_gramatica('\n <TR><TD> ALTERTB ::= add ADD_OPC </TD> <TD> { altertb.inst = add(add_Opc.val) } </TD></TR>')
elif p[1] == 'drop':
concatenar_gramatica('\n <TR><TD> ALTERTB ::= drop DROP_OPC </TD> <TD> { altertb.inst = drop(drop_opc.val) } </TD></TR>')
elif p[1] == 'alter':
concatenar_gramatica('\n <TR><TD> ALTERTB ::= alter column ALTER_COLUMN </TD> <TD> { altertb.inst = alter(alter_column.val) } </TD></TR>')
elif p[1] == 'rename':
concatenar_gramatica('\n <TR><TD> ALTERTB ::= rename column id to id </TD> <TD> { altertb.inst = rename(id1,id2) } </TD></TR>')
def p_Add_Opc(p):
'''Add_Opc : t_column id Tipo
| t_foreign t_key par1 id par2 t_references id
| t_constraint id t_unique par1 id par2
| t_check EXP '''
if p[1] == 'column':
concatenar_gramatica('\n <TR><TD> ADD_OPC ::= column id TIPO </TD> <TD> { add_opc.inst = column(id, tipo.type) } </TD></TR>')
elif p[1] == 'foreign':
concatenar_gramatica('\n <TR><TD> ADD_OPC ::= foreign key ( id ) references id </TD> <TD> { add_opc.isnt = foreign(id1,id2)} </TD></TR>')
elif p[1] == 'constraint':
concatenar_gramatica('\n <TR><TD> ADD_OPC ::= constraint id unique ( id ) </TD> <TD> { add_opc.inst = constraint(id1,id2)} </TD></TR>')
elif p[1] == 'check':
concatenar_gramatica('\n <TR><TD> ADD_OPC ::= check EXP </TD> <TD> {add_opc.inst = check( exp.val )} </TD></TR>')
def p_Drop_Opc(p):
''' Drop_Opc : t_column id
| t_constraint id '''
if p[1] == 'column':
concatenar_gramatica('\n <TR><TD> DROP_OPC ::= column id TIPO </TD> <TD> {drop_opc.val = column,id }</TD></TR>')
elif p[1] == 'constraint':
concatenar_gramatica('\n <TR><TD> DROP_OPC ::= foreign key ( id ) references id </TD> <TD> { drop_opc.val = constraint,id} </TD></TR>')
def p_Alter_Column(p):
''' Alter_Column : id t_set t_not t_null
| Alter_Columns'''
if len(p) == 5:
concatenar_gramatica('\n <TR><TD> ALTER_COLUMN ::= id set not null </TD> <TD> { alter_column.val = id} </TD></TR>')
else:
concatenar_gramatica('\n <TR><TD> ALTER_COLUMN ::= ALTER_COLUMNS </TD> <TD> { alter_column.val = alter_columns.val } </TD></TR>')
def p_Alter_Columns(p):
''' Alter_Columns : Alter_Columns coma Alter_Column1
| Alter_Column1'''
if len(p) == 4:
concatenar_gramatica('\n <TR><TD> ALTER_COLUMNS ::= ALTER_COLUMNS , ALTER_COLUMN1 </TD> <TD> { alter_columns.lista.add(alter_column1.val) } </TD></TR>')
else:
concatenar_gramatica('\n <TR><TD> ALTER_COLUMNS ::= ALTER_COLUMN1 </TD> <TD> { alter_columns.lista = [alter_column.val]} </TD></TR>')
def p_Alter_Colum1(p):
'''Alter_Column1 : id t_type t_varchar par1 entero par2
| t_alter t_column id t_type t_varchar par1 entero par2'''
if p[1] == 'alter':
concatenar_gramatica('\n <TR><TD> ALTER_COLUMN1 ::= alter column id type varchar ( entero ) </TD> <TD> { alter_Column.inst = alter_column(id,varchar,entero) }</TD></TR>')
else:
concatenar_gramatica('\n <TR><TD> ALTER_COLUMN1 ::= id type varchar ( entero ) </TD> <TD> { alter_Column.inst = alter_Column(id,varchar,entero)} </TD></TR>')
def p_Create(p):
'Create : CreateDB'
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> CREATE ::= CREATEDB </TD> <TD> { create.val = createDB.val } </TD></TR>')
def p_Create1(p):
'Create : CreateTB '
concatenar_gramatica('\n <TR><TD> CREATE ::= CREATETB </TD> <TD> { create.val = createtb.val }</TD></TR>')
def p_CreateDB(p):
'''CreateDB : t_database Op1_DB
| t_or t_replace t_database Op1_DB'''
if len(p) == 3:
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> CREATEDB ::= database OP1_DB </TD> <TD> { createdb.inst = op1_db.val } </TD></TR>')
else:
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> CREATEDB ::= or replace database OP1_DB </TD> <TD> { createdb.inst = replace(op1_db.val)} </TD></TR>')
def p_Op1_DB(p):
''' Op1_DB : t_if t_not t_exists id Sesion
| id Sesion'''
if p[1] == 'if':
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> OP1_DB ::= if not exists id SESION </TD> <TD> { op1_db = id ,sesion.val} </TD></TR>')
else:
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> OP1_DB ::= id SESION </TD> <TD> { op1_db = id, sesion.val} </TD></TR>')
def p_Sesion(p):
''' Sesion : t_owner Op_Sesion Sesion_mode
| t_mode Op_Sesion
| empty '''
if p[1] == 'owner':
concatenar_gramatica('\n <TR><TD> SESION ::= owner OP_SESION SESION_MODE </TD> <TD> { session.val = owner Op_Sesion} </TD></TR>')
elif p[1] == 'mode':
concatenar_gramatica('\n <TR><TD> SESION ::= mode OP_SESION </TD> <TD> { session.val = mode Op_Sesion.val} </TD></TR>')
else:
concatenar_gramatica('\n <TR><TD> SESION ::= EMPTY </TD> <TD> { session.val = empty.val } </TD></TR>')
def p_Op_Sesion(p):
''' Op_Sesion : igual id
| id '''
p[0] = p[2]
concatenar_gramatica('\n <TR><TD> OP_SESION ::= = id </TD> <TD> { Op_sesion.val = id } </TD></TR>')
def p_Sesion_mode(p):
''' Sesion_mode : t_mode Op_Sesion
| empty '''
if len(p) == 3:
p[0] = p[2]
concatenar_gramatica('\n <TR><TD> SESION_MODE ::= mode OP_SESION </TD> <TD> {sesion_mode.val = mode op_sesion.val} </TD></TR>')
else:
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> SESION_MODE ::= EMPTY </TD> <TD> { sesion_mode.val = empty.val } </TD></TR>')
def p_CreateTB(p):
'CreateTB : t_table id par1 Columnas par2 Inherits '
concatenar_gramatica('\n <TR><TD> CREATETB ::= table id ( COLUMNAS ) INHERITS </TD> <TD> {createtb.inst = createtb(id,columnas.val,inherits.val)} </TD></TR>')
def p_Inherits(p):
''' Inherits : t_inherits par1 id par2
| empty '''
if len(p) == 5:
concatenar_gramatica('\n <TR><TD> INHERITS ::= inherits ( id ) </TD> <TD> { inherits.inst = inherits(id) } </TD></TR>')
else:
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> INHERITS ::= EMPTY </TD> <TD> { inherits.inst = empty.val } </TD></TR>')
def p_Columnas(p):
''' Columnas : Columnas coma Columna
| Columna '''
if len(p) == 4:
concatenar_gramatica('\n <TR><TD> COLUMNAS ::= COLUMNAS , COLUMNA </TD> columnas.val = concatenar(columna.aux , columna.val) <TD> </TD></TR>')
else:
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> COLUMNAS ::= COLUMNA </TD> <TD> columnas.aux = columna.val </TD></TR>')
def p_Columna(p):
''' Columna : id Tipo Cond_CreateTB
| Constraint'''
if len(p) == 4:
concatenar_gramatica('\n <TR><TD> COLUMNA ::= id TIPO COND_CREATETB </TD> <TD> {columna.val = cond_createtb.val} </TD></TR>')
else:
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> COLUMNA ::= CONSTRAINT </TD> <TD> { columna.val = constraint.val} </TD></TR>')
def p_Cond_CreateTB(p):
''' Cond_CreateTB : t_default id Cond_CreateTB
| t_not t_null Cond_CreateTB
| t_null Cond_CreateTB
| t_constraint id Opc_Constraint Cond_CreateTB
| t_primary t_key Cond_CreateTB
| t_references id Cond_CreateTB
| empty'''
if p[1] == 'default':
concatenar_gramatica('\n <TR><TD> COND_CREATETB ::= default id COND_CREATETB </TD> <TD> { cond_createtb.val = id cond_createtb.val } </TD></TR>')
elif p[1] == 'not':
concatenar_gramatica('\n <TR><TD> COND_CREATETB ::= not null COND_CREATETB </TD> <TD> { cond_createtb.val = cond_createtb.val } </TD></TR>')
elif p[1] == 'null':
concatenar_gramatica('\n <TR><TD> COND_CREATETB ::= null COND_CREATETB </TD> { cond_createtb.val = cond_createtb.val } </TD></TR>')
elif p[1] == 'constraint':
concatenar_gramatica('\n <TR><TD> COND_CREATETB ::= constraint id OPC_CONSTRAINT COND_CREATETB </TD> { cond_createtb.val = id opc_constraint.val cond_createtb.val } <TD> </TD></TR>')
elif p[1] == 'primary':
concatenar_gramatica('\n <TR><TD> COND_CREATETB ::= primary key COND_CREATETB </TD> <TD> { cond_createtb.val = cond_createtb.val } </TD></TR>')
elif p[1] == 'references':
concatenar_gramatica('\n <TR><TD> COND_CREATETB ::= references id COND_CREATETB </TD> <TD> { cond_createtb.val = id cond_createtb.val } </TD></TR>')
else:
concatenar_gramatica('\n <TR><TD> COND_CREATETB ::= EMPTY </TD> <TD> { cond_createtb.val = empty.val } </TD></TR>')
def p_Opc_Constraint(p):
''' Opc_Constraint : t_unique
| t_check par1 EXP par2 '''
if p[1] == 'unique':
concatenar_gramatica('\n <TR><TD> OPC_CONSTRAINT ::= unique </TD> <TD> { opc_constraint.val = unique }</TD></TR>')
elif p[1] == 'check':
concatenar_gramatica('\n <TR><TD> OPC_CONSTRAINT ::= check ( EXP ) </TD> <TD> { opc_constraint.val = check exp.val } </TD></TR>')
def p_Constraint(p):
''' Constraint : t_unique par1 Lista_ID par2
| t_constraint id t_check par1 EXP par2
| t_check par1 EXP par2
| t_primary t_key par1 Lista_ID par2
| t_foreign t_key par1 Lista_ID par2 t_references id par1 Lista_ID par2
| empty '''
if p[1] == 'unique':
concatenar_gramatica('\n <TR><TD> CONSTRAINT ::= unique ( LISTA_ID ) </TD> <TD> {constraint.inst = unique(lista_id.list)}</TD></TR>')
elif p[1] == 'constraint':
concatenar_gramatica('\n <TR><TD> CONSTRAINT ::= constraint id check ( EXP ) </TD> <TD> {constraint.inst = constraint(id,check,exp.val)}</TD></TR>')
elif p[1] == 'check':
concatenar_gramatica('\n <TR><TD> CONSTRAINT ::= check ( EXP ) </TD> <TD> { constraint.inst = check(exp.val)} </TD></TR>')
elif p[1] == 'primary':
concatenar_gramatica('\n <TR><TD> CONSTRAINT ::= primary key ( LISTA_ID ) </TD> <TD> {constraint.inst = primary(lista_id.list)} </TD> </TR>')
elif p[1] == 'foreign':
concatenar_gramatica('\n <TR><TD> CONSTRAINT ::= foreign key ( LISTA_ID ) references id ( LISTA_ID ) </TD> <TD> { constraint.inst = foreign(lista_id.lista,id,lista_id.lista)} </TD></TR>')
else:
concatenar_gramatica('\n <TR><TD> CONSTRAINT ::= EMPTY </TD> <TD> { constraint.inst = empty.val } </TD></TR>')
def p_Tipo(p):
''' Tipo : t_smallint
| t_integer
| t_bigint
| t_decimal
| t_numeric
| t_real
| t_double t_precision
| t_money
| t_character t_varying par1 Valor par2
| t_varchar par1 Valor par2
| t_character par1 Valor par2
| t_charn par1 Valor par2
| t_text
| t_boolean '''
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> TIPO ::= ' + str(p[1]) + '</TD> <TD> { tipo.type = ' + str(p[1]) + ' } </TD></TR>')
def p_Valor(p):
''' Valor : decimal
| entero
| string
| char
| t_true
| t_false'''
p[0] = Expression(p[1], p.slice[1].lineno, find_column(input, p.slice[1]), p.slice[1].type)
concatenar_gramatica('\n <TR><TD> VALOR ::= ' + str(p[1]) + '</TD> <TD> { Valor.val = ' + str(p[1]) + '} </TD></TR>')
def p_Valor2(p):
'Valor : id'
p[0] = Expression(p[1], p.slice[1].lineno, find_column(input, p.slice[1]))
concatenar_gramatica('\n <TR><TD> VALOR ::= id </TD> <TD> { valor.val = ' + str(p[1]) + ' } </TD></TR>')
def p_empty(p):
'empty :'
p[0] = []
concatenar_gramatica('\n <TR><TD> EMPTY ::= epsilon </TD> <TD> { } </TD></TR>')
# ----------------------------EXPRESIONES Y OPERACIONES---------------------------------------------------------------
def p_aritmeticas(p):
'''EXP : EXP mas EXP
| EXP menos EXP
| EXP asterisco EXP
| EXP div EXP
| EXP pot EXP
| EXP porcentaje EXP'''
p[0] = Expression(p[1], p[3], p.slice[2].value, p.slice[2].lineno, find_column(input, p.slice[2]),'Aritmetica')
concatenar_gramatica('\n <TR><TD> EXP ::= EXP' + str(p[2]) + ' EXP </TD> <TD> { Exp = Exp1.val ' + str(p[2]) + ' Exp2.val } </TD></TR>')
def p_parentesis(p):
'EXP : par1 EXP par2'
p[0] = p[2]
concatenar_gramatica('\n <TR><TD> EXP ::= ( EXP ) </TD> <TD> { exp.val = exp1.val } </TD></TR>')
def p_relacionales(p):
'''EXP : EXP mayor EXP
| EXP mayori EXP
| EXP menor EXP
| EXP menori EXP
| EXP igual EXP
| EXP diferente EXP
| EXP diferentede EXP'''
p[0] = Expression(p[1], p[3], p.slice[2].value, p.slice[2].lineno, find_column(input, p.slice[2]), 'Relacional')
concatenar_gramatica('\n <TR><TD> EXP ::= EXP' + str(p[2]) + ' EXP </TD> <TD> { Exp = Exp1.val ' + str(p[2]) + ' Exp2.val } </TD></TR>')
def p_logicos(p):
'''EXP : EXP t_and EXP
| EXP t_or EXP
'''
p[0] = Expression(p[1], p[3], p.slice[2].value, p.slice[2].lineno, find_column(input, p.slice[2]), 'Logica')
concatenar_gramatica('\n <TR><TD> EXP ::= EXP' + str(p[2]) + ' EXP </TD> <TD> { Exp = Exp1.val ' + str(p[2]) + ' Exp2.val } </TD></TR>')
def p_unario(p):
'''EXP : mas EXP %prec umas
| menos EXP %prec umenos
| t_not EXP'''
if p[1] == 'not':
p[0] = Expression(p.slice[1].value, p[2], p.slice[2].lineno, find_column(input, p.slice[2]), 'Unario')
concatenar_gramatica('\n <TR><TD> EXP ::= not EXP </TD> <TD> { Exp = Exp1.val } </TD></TR>')
else:
p[0] = Expression(p.slice[1].value, p[2], p.slice[2].lineno, find_column(input, p.slice[2]), 'Unario')
concatenar_gramatica('\n <TR><TD> EXP ::= ' + str(p[1]) + 'EXP %prec' + str(p[4]) +'</TD> <TD> { exp = exp1.val } </TD></TR>')
def p_EXP_Valor(p):
'EXP : Valor'
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> EXP ::= VALOR </TD> <TD> { exp.val = valor.val } </TD></TR>')
def p_EXP_Indices(p):
'''EXP : id punto id'''
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> EXP ::= id . id </TD> <TD> { exp.val = id1.val . id2.val } </TD></TR>')
def p_EXP_IndicesAS(p):
'''EXP : EXP t_as EXP'''
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> EXP ::= EXP as EXP </TD> <TD> { exp.val = exp1.val as exp2.val } </TD></TR>')
def p_exp_agregacion(p):
'''EXP : t_avg par1 EXP par2
| t_sum par1 EXP par2
| t_count par1 EXP par2
| t_max par1 EXP par2
| t_min par1 EXP par2'''
concatenar_gramatica('\n <TR><TD> EXP ::= ' + str(p[1]) + '( EXP ) </TD> <TD> { exp.val = ' + str(p[1]) + ' ( exp1.val ) } </TD></TR>')
def p_funciones_matematicas(p):
''' EXP : t_abs par1 EXP par2
| t_cbrt par1 EXP par2
| t_ceil par1 EXP par2
| t_ceiling par1 EXP par2
| t_degrees par1 EXP par2
| t_exp par1 EXP par2
| t_factorial par1 EXP par2
| t_floor par1 EXP par2
| t_ln par1 EXP par2
| t_log par1 EXP par2
| t_pi par1 par2
| t_radians par1 EXP par2
| t_round par1 EXP par2
| t_min_scale par1 EXP par2
| t_scale par1 EXP par2
| t_sign par1 EXP par2
| t_sqrt par1 EXP par2
| t_trim_scale par1 EXP par2
| t_trunc par1 EXP par2
| t_width_bucket par1 Lista_EXP par2
| t_random par1 par2
| t_setseed par1 EXP par2'''
concatenar_gramatica('\n <TR><TD> EXP ::= ' + str(p[1]) + '( EXP ) </TD> <TD> { exp.val = ' + str(p[1]) + ' ( exp1.val ) } </TD></TR>')
def p_funciones_matematicas(p):
''' EXP : t_div par1 EXP coma EXP par2
| t_gcd par1 EXP coma EXP par2
| t_mod par1 EXP coma EXP par2
| t_power par1 EXP coma EXP par2'''
concatenar_gramatica('\n <TR><TD> EXP ::= ' + str(p[1]) + '( EXP , EXP) </TD> <TD> { exp.val = ' + str(p[1]) + ' ( exp1.val , exp2.val) } </TD></TR>')
def p_funciones_Trigonometricas(p):
''' EXP : t_acos par1 EXP par2
| t_acosd par1 EXP par2
| t_asin par1 EXP par2
| t_asind par1 EXP par2
| t_atan par1 EXP par2
| t_atand par1 EXP par2
| t_cos par1 EXP par2
| t_cosd par1 EXP par2
| t_cot par1 EXP par2
| t_cotd par1 EXP par2
| t_sin par1 EXP par2
| t_sind par1 EXP par2
| t_tan par1 EXP par2
| t_tand par1 EXP par2 '''
concatenar_gramatica('\n <TR><TD> EXP ::= ' + str(p[1]) + '( EXP ) </TD> <TD> { exp.val = ' + str(p[1]) + ' ( exp1.val )} </TD></TR>')
def p_funciones_Trigonometricas1(p):
''' EXP : t_atan2 par1 EXP coma EXP par2
| t_atan2d par1 EXP coma EXP par2 '''
concatenar_gramatica('\n <TR><TD> EXP ::= ' + str(p[1]) + '( EXP , EXP ) </TD> <TD> { exp.val = ' + str(p[1]) + ' ( exp1.val , exp2.val ) } </TD></TR>')
def p_funciones_String_Binarias(p):
''' EXP : t_length par1 id par2
| t_substring par1 char coma entero coma entero par2
| t_trim par1 char par2
| t_md5 par1 char par2
| t_sha256 par1 par2
| t_substr par1 par2
| t_get_byte par1 par2
| t_set_byte par1 par2
| t_convert par1 EXP t_as Tipo par2
| t_encode par1 par2
| t_decode par1 par2 '''
if p[1] == 'substring':
concatenar_gramatica('\n <TR><TD> EXP ::= substring ( char , integer , integer ) </TD> <TD> { exp.val = substring ( char, integer,integer } </TD></TR>')
elif p[1] == 'convert':
concatenar_gramatica('\n <TR><TD> EXP ::= convert ( EXP as TIPO ) </TD> <TD> { exp.val = conver( exp.val , Tipo.type )} </TD></TR>')
else:
concatenar_gramatica('\n <TR><TD> EXP ::= ' + str(p[1]) + '( EXP ) </TD> <TD> { exp.val = ' + str(p[1]) + ' ( exp1.val ) }</TD></TR>')
# --------------------------------------Listas Fundamentales--------------------------------------------
def p_Lista_ID(p):
'''Lista_ID : Lista_ID coma id
| id '''
if len(p) == 4:
p[0] = p[1] + [p[3]]
concatenar_gramatica('\n <TR><TD> LISTA_ID ::= LISTA_ID , id </TD> <TD> { lista_id.val = concatenar (lista_id.aux, lista_id.val) } </TD></TR>')
else:
p[0] = [p[1]]
concatenar_gramatica('\n <TR><TD> LISTA_ID ::= id </TD> <TD> { lista_id.aux = id } </TD></TR>')
def p_Lista_Enum(p):
'''Lista_Enum : Lista_Enum coma char
| char '''
print(len(p))
if len(p) == 4:
p[0] = p[1]+[p[3]]
concatenar_gramatica('\n <TR><TD> LISTA_ENUM ::= LISTA_ENUM , char </TD> <TD> { lista_enum.lista = lista_enum.lista.add(lista_enum.aux) } </TD></TR>')
else:
p[0] = [p[1]]
concatenar_gramatica('\n <TR><TD> LISTA_ENUM ::= char </TD> <TD> { lista_enum.lista = [char] } </TD></TR>')
def p_Lista_EXP(p):
'''Lista_EXP : Lista_EXP coma EXP
| EXP '''
if len(p) == 4:
p[0] = p[1]+[p[3]]
concatenar_gramatica('\n <TR><TD> LISTA_EXP ::= LISTA_EXP , EXP </TD> <TD> { lista_exp.val = concatenar (lista_exp.aux , lista_exp.val) }</TD></TR>')
else:
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> LISTA_EXP ::= EXP </TD> <TD> { lista_exp.aux = char } </TD></TR>')
def p_Lista_Alias(p):
'''Lista_Alias : Lista_Alias coma Nombre_Alias
| Nombre_Alias '''
if len(p) == 4:
p[0] = p[1]+[p[3]]
concatenar_gramatica('\n <TR><TD> LISTA_ALIAS ::= LISTA_ALIAS , Nombre_Alias </TD> <TD> { lista_alias.val = concatenar ( lista_alias.aux , lista_alias.val) }</TD></TR>')
else:
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> LISTA_ALIAS ::= Nombre_Alias </TD> <TD> { lista_alias.aux = nombre_alias.val } </TD></TR>')
def p_Nombre_Alias(p):
'''Nombre_Alias : id id'''
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> NOMBRE_ALIAS ::= id id </TD> <TD> { nombre_alias.val = id1 id2}</TD></TR>')
def p_error(p):
if not p:
print('end of file')
ListaErrores.insertar(err.Nodo_Error("Sintactico", "Se esperaba mas pero llega fin de texto", input.count('\n'), len(input)))
return
ListaErrores.insertar(err.Nodo_Error("Sintactico", str(p.value),
p.lineno, find_column(input, p)))
while True:
tok = parser.token()
if not tok or tok.type == 'pyc':
break
def concatenar_gramatica(cadena):
global reporteg
reporteg = cadena + reporteg
def parse(input1, errores1):
global input
global ListaErrores
global reporteg
ListaErrores = errores1
reporteg = ''
input = input1
global parser
parser = yacc.yacc()
parser.errok()
par = parser.parse(input, tracking=True, lexer=lexer)
return par
|
import Errores.Nodo_Error as err
from ply import lex
from AST.Sentencias import Raiz, Sentencia
import AST.SentenciasDDL as DDL
import ply.yacc as yacc
reservadas = {
'select': 't_select',
'distinct': 't_distinct',
'as': 't_as',
'from': 't_from',
'where': 't_where',
'having': 't_having',
'avg': 't_avg',
'min': 't_min',
'max': 't_max',
'sum': 't_sum',
'count': 't_count',
'insert': 't_insert',
'into': 't_into',
'values': 't_values',
'delete': 't_delete',
'update': 't_update',
'true': 't_true',
'false': 't_false',
'not': 't_not',
'and': 't_and',
'or': 't_or',
'smallint': 't_smallint',
'integer': 't_integer',
'bigint': 't_bigint',
'decimal': 't_decimal',
'numeric': 't_numeric',
'real': 't_real',
'double': 't_double',
'precision': 't_precision',
'money': 't_money',
'character': 't_character',
'varying': 't_varying',
'varchar': 't_varchar',
'char': 't_charn',
'text': 't_text',
'boolean': 't_boolean',
'bool': 't_bool',
'type': 't_type',
'enum': 't_enum',
'create': 't_create',
'replace': 't_replace',
'database': 't_database',
'if': 't_if',
'exists': 't_exists',
'owner': 't_owner',
'mode': 't_mode',
'show': 't_show',
'databases': 't_databases',
'like': 't_like',
'alter': 't_alter',
'rename': 't_rename',
'to': 't_to',
'current_user': 't_current_user',
'session_user': 't_session_user',
'drop': 't_drop',
'table': 't_table',
'delete': 't_delete',
'only': 't_only',
'using': 't_using',
'current': 't_current',
'of': 't_of',
'returning': 't_returning',
'as': 't_as',
'inherits': 't_inherits',
'primary': 't_primary',
'key': 't_key',
'references': 't_references',
'foreign': 't_foreign',
'null': 't_null',
'constraint': 't_constraint',
'unique': 't_unique',
'check': 't_check',
'add': 't_add',
'set': 't_set',
'rename': 't_rename',
'column': 't_column',
'inner': 't_inner',
'left': 't_left',
'right': 't_right',
'full': 't_full',
'outer': 't_outer',
'join': 't_join',
'natural': 't_natural',
'on': 't_on',
'abs': 't_abs',
'cbrt': 't_cbrt',
'ceil': 't_ceil',
'ceiling': 't_ceiling',
'degrees': 't_degrees',
'div': 't_div',
'exp': 't_exp',
'factorial': 't_factorial',
'floor': 't_floor',
'gcd': 't_gcd',
'ln': 't_ln',
'log': 't_log',
'mod': 't_mod',
'pi': 't_pi',
'power': 't_power',
'radians': 't_radians',
'round': 't_round',
'use': 't_use',
'default' : 't_default',
'acos' : 't_acos',
'acosd' : 't_acosd',
'asin' : 't_asin',
'asind' : 't_asind',
'atan' : 't_atan',
'atand' : 't_atand',
'atan2' : 't_atan2',
'atan2d' : 't_atan2d',
'cos' : 't_cos',
'cosd' : 't_cosd',
'cot' : 't_cot',
'cotd' : 't_cotd',
'sin' : 't_sin',
'sind' : 't_sind',
'tan' : 't_tan',
'tand' : 't_tand',
'sinh' : 't_sinh',
'cosh' : 't_cosh',
'tanh' : 't_tanh',
'asinh' : 't_asinh',
'acosh' : 't_acosh',
'atanh' : 't_atanh',
'min_scale' : 't_min_scale',
'scale' : 't_scale',
'sign' : 't_sign',
'sqrt' : 't_sqrt',
'trim_scale' : 't_trim_scale',
'trunc' : 't_trunc',
'width_bucket' : 't_width_bucket',
'random' : 't_random',
'setseed' : 't_setseed',
'length' : 't_length',
'substring' : 't_substring',
'trim' : 't_trim',
'md5' : 't_md5',
'sha256' : 't_sha256',
'substr' : 't_substr',
'get_byte' : 't_get_byte',
'set_byte' : 't_set_byte',
'convert' : 't_convert',
'encode' : 't_encode',
'decode' : 't_decode'
}
tokens = [
'par1',
'par2',
'cor1',
'cor2',
'asterisco',
'mas',
'menos',
'pyc',
'coma',
'div',
'punto',
'igual',
'menor',
'mayor',
'menori',
'mayori',
'diferente',
'porcentaje',
'diferentede',
'pot',
'bipunto',
'id',
'decimal',
'entero',
'char',
'string'
] + list(reservadas.values())
# Tokens
t_par1 = r'\('
t_par2 = r'\)'
t_cor1 = r'\['
t_cor2 = r'\]'
t_pyc = r';'
t_punto = r'\.'
t_coma = r'\,'
t_igual = r'\='
t_mas = r'\+'
t_menos = r'-'
t_asterisco = r'\*'
t_div = r'/'
t_mayor = r'>'
t_menor = r'<'
t_mayori = r'>='
t_menori = r'<='
t_diferente = r'!='
t_porcentaje = r'\%'
t_pot = r'\^'
t_bipunto = r'::'
t_diferentede = r'<>'
def t_decimal(t):
r'\d+\.\d+'
try:
t.value = float(t.value)
except ValueError:
print("El valor es muy grande %d", t.value)
t.value = 0
return t
def t_entero(t):
r'\d+'
try:
t.value = int(t.value)
except ValueError:
print("El valor de integer es muy grande %d", t.value)
t.value = 0
return t
def t_char(t):
r'\'.*?\''
t.value = t.value[1:-1] # se remueven comillas
return t
def t_string(t):
r'\".*?\"'
t.value = t.value[1:-1] # se remueven comillas
return t
def t_id(t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
t.type = reservadas.get(t.value.lower(), 'id') # Check for reserved words
return t
def t_COMENTARIO_MULTILINEA(t):
r'/\*(.|\n)*?\*/'
t.lexer.lineno += t.value.count('\n')
# Comentario simple // ...
def t_COMENTARIO_SIMPLE(t):
r'--.*\n'
t.lexer.lineno += 1
# Caracteres ignorados
t_ignore = " \t"
def t_newline(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
def find_column(input, token):
line_start = input.rfind('\n', 0, token.lexpos) + 1
return (token.lexpos - line_start) + 1
def t_error(t):
ListaErrores.insertar(err.Nodo_Error("Lexico", "Caracter no valido '%s'" % t.value[0],
t.lineno, find_column(input, t)))
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
lexer = lex.lex()
precedence = (
('left', 'punto', 'bipunto'),
('left', 'coma'),
('right', 'igual'),
('left', 'cor1', 'cor2'),
('left', 'mas', 'menos'),
('left', 'asterisco', 'div', 'porcentaje'),
('left', 'pot'),
('right', 'umenos', 'umas'),
('left', 'par1', 'par2'),
# Between , in , like, ilike, simiar, is isnull notnull
('left', 't_or'),
('left', 't_and'),
('left', 'diferente'),
('left', 'mayor', 'menor', 'mayori', 'menori'),
('right', 't_not')
)
from AST.Expresiones import *
from AST.SentenciasDML import *
reporteg = ''
def p_sql(p):
'SQL : Sentencias_SQL'
p[0] = Raiz(ListaErrores, p[1])
def p_sql2(p):
'SQL : empty'
p[0] = Raiz(ListaErrores)
def p_Sentencias_SQL_Sentencia_SQL(p):
'Sentencias_SQL : Sentencias_SQL Sentencia_SQL'
p[0] = p[1] + [p[2]]
concatenar_gramatica('\n <TR><TD> SENTENCIAS_SQL ::= SENTENCIAS_SQL SENTENCIA_SQL </TD> <TD> { sentencias_sql.lista = sentencias_sql.lista.add(sentencia_sql.lista) } </TD></TR> ')
def p_Sentencias_SQL(p):
'Sentencias_SQL : Sentencia_SQL'
p[0] = [p[1]]
concatenar_gramatica('\n <TR><TD> SENTENCIAS_SQL ::= SENTENCIA_SQL </TD> <TD> { sentencias_sql.lista = [sentencia_sql] } </TD></TR>')
def p_Sentencia_SQL_DML(p):
'Sentencia_SQL : Sentencias_DML'
p[0] = Sentencia("SentenciaDML", [p[1]])
concatenar_gramatica('\n <TR><TD> SENTENCIA_SQL ::= SENTENCIAS_DML </TD> <TD> { sentencia_sql.inst = sentencias_dml.inst } </TD></TR>')
#def p_Sentencia_SQL_DML(p):
# 'Sentencia_SQL : EXP pyc'
# p[0] = Sentencia("EXP", [p[1]])
def p_Sentencia_SQL_DDL(p):
'Sentencia_SQL : Sentencias_DDL'
p[0] = Sentencia("SentenciaDDL", [p[1]])
concatenar_gramatica('\n <TR><TD> SENTENCIA_SQL ::= SENTENCIAS_DDL </TD> <TD> { sentencia_sql.inst = sentencias_dll.inst } </TD></TR>')
# -------------------------------------------------------------SENTENCIAS DML
def p_Sentencias_DML(p):
'''Sentencias_DML : t_select Lista_EXP Select_SQL Condiciones pyc
| t_select asterisco Select_SQL Condiciones pyc
| t_insert t_into id Insert_SQL pyc
| t_update id t_set Lista_EXP t_where EXP pyc
| t_delete t_from id Condiciones pyc
| t_use id pyc'''
if p[1] == 'select':
p[0] = Select(p[2], p[3], p[4], p.slice[2].lineno, find_column(input, p.slice[2]))
concatenar_gramatica('\n <TR><TD> SENTENCIAS_DML ::= select' + str(p[2]) + 'SELECT_SQL ; </TD><TD> { sentencias_dml.inst = select(lista_exp.lista, Select_SQL.val,Condiciones.val)} </TD></TR>')
elif p[1] == 'insert':
p[0] = Insert(p[3], p[4], p.slice[1].lineno, find_column(input, p.slice[1]))
concatenar_gramatica('\n <TR><TD> SENTENCIAS_DML ::= insert into id INSERT_SQL ; </TD> <TD> {sentencias_dml.inst = insert(id,Insert_SQL.inst)} </TD></TR>')
elif p[1] == 'update':
concatenar_gramatica('\n <TR><TD> SENTENCIAS_DML ::= update id set LISTA_EXP where EXP ; </TD> <TD> {sentencias_dml.inst = update(id, lista_exp.list, exp.val)} </TD></TR>')
elif p[1] == 'delete':
concatenar_gramatica('\n <TR><TD> SENTENCIAS_DML ::= delete from id CONDICIONES ; </TD> <TD> { sentencias_dml.inst = delete(id, Condiciones.val) } </TD></TR>')
else:
concatenar_gramatica('\n <TR><TD> SENTENCIAS_DML ::= use database id ; </TD> <TD> {sentencias_dml.inst = use(id)} </TD></TR>')
def p_Select_SQL(p):
'Select_SQL : t_from Table_Expression'
p[0] = p[2]
concatenar_gramatica('\n <TR><TD> SELECT_SQL ::= from TABLE_EXPRESSION CONDICIONES </TD> <TD> { select_sql.val = Table_Expression.val } </TD></TR>')
def p_Select2_SQL(p):
'Select_SQL : empty'
p[0] = []
concatenar_gramatica('\n <TR><TD> SELECT_SQL ::= EMPTY </TD> <TD> { select_sql.val = empty.val }</TD></TR>')
def p_Table_Expression(p):
'''Table_Expression : Alias_Tabla
| Subqueries'''
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> TABLE_EXPRESSION ::= ' + str(p[1]) + '</TD> <TD> { table_expression.val = ' + str(p[1]) + '.val } </TD></TR>')
def p_Alias_Tabla(p):
'''Alias_Tabla : Lista_ID
| Lista_Alias'''
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> ALIAS_TABLA ::= ' + str(p[1]) + '</TD> <TD> { alias_tabla.val = ' + str(p[1]) + '.list } </TD></TR>')
def p_Subqueries(p):
'''Subqueries : par1 t_select par2'''
concatenar_gramatica('\n <TR><TD> SUBQUERIES ::= ( select )</TD> <TD> { subqueries.inst = select() } </TD></TR>')
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> INSERT <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
def p_Insert_SQL(p):
'Insert_SQL : par1 Lista_ID par2 t_values par1 Lista_EXP par2'
p[0] = p[6]
concatenar_gramatica('\n <TR><TD> INSERT_SQL ::= ( LISTA_ID ) values ( LISTA_EXP ) </TD> <TD> { insert_sql.inst = insert1(lista_id.lista,lista_exp.lista)} </TD></TR>')
def p_Insert_SQL2(p):
'Insert_SQL : t_values par1 Lista_EXP par2'
p[0] = p[3]
concatenar_gramatica('\n <TR><TD> INSERT_SQL ::= values ( LISTA_EXP ) </TD> <TD> { insert_sql.inst = insert1(lista_exp.lista)} </TD></TR>')
def p_Condiciones(p):
'''Condiciones : t_where EXP
| empty'''
if len(p) == 3:
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> CONDICIONES ::= where EXP </TD> <TD> condiciones.val = exp.val </TD></TR>')
else:
p[0] = []
concatenar_gramatica('\n <TR><TD> INSERT_SQL ::= EMPTY </TD> <TD> { insert_sql.val = empty.val }</TD></TR>')
# ---------------------------- Sentencias DDL y Enum Type --------------
def p_Sentencias_DDL(p):
'''Sentencias_DDL : t_show t_databases pyc
| Enum_Type
| t_drop Drop pyc
| t_alter Alter pyc
| t_create Create pyc'''
if p[1].__class__.__name__ == 'CreateType':
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> SENTENCIAS_DDL ::= ENUM_TYPE </TD> <TD> { sentencias_ddl.inst = enum_type.inst } </TD></TR>')
elif p[1].upper() == 'SHOW':
p[0] = DDL.ShowDatabases(p.slice[1].lineno, find_column(input, p.slice[1]))
concatenar_gramatica('\n <TR><TD> SENTENCIAS_DDL ::= show databases ; </TD> <TD> { sentencias_ddl.inst = show() } </TD></TR>')
elif p[1].upper() == 'CREATE':
p[0] = None
concatenar_gramatica('\n <TR><TD> SENTENCIAS_DDL ::= create CREATE ; </TD> <TD> { sentencias_ddl.inst = create.inst} </TD></TR>')
elif p[1].upper() == 'DROP':
p[0] = None
concatenar_gramatica('\n <TR><TD> SENTENCIAS_DDL ::= drop Drop ; </TD> <TD> { sentencias_ddl.inst = drop.inst } </TD></TR>')
elif p[1].upper() == 'ALTER':
p[0] = None
concatenar_gramatica('\n <TR><TD> SENTENCIAS_DDL ::= alter ALTER ; </TD> <TD> { sentencias_ddl.inst = alter.inst }</TD></TR>')
else:
p[0] = None
def p_Enum_Type(p):
'Enum_Type : t_create t_type id t_as t_enum par1 Lista_Enum par2 pyc'
p[0] = DDL.CreateType(p[3].lower(), p[7], p.slice[1].lineno, find_column(input, p.slice[1]))
concatenar_gramatica('\n <TR><TD> ENUM_TYPE ::= create type id as enum ( LISTA_ENUM ) ; </TD> <TD> { enum_type.inst = createType(id,lista_Enum.val) } </TD></TR>')
def p_Drop(p):
'''Drop : t_database DropDB id
| t_table id '''
if p[1] == 'database':
concatenar_gramatica('\n <TR><TD> DROP ::= database DROPDB id </TD> <TD> { drop.inst = dropBD( id) } </TD></TR>')
else:
concatenar_gramatica('\n <TR><TD> DROP ::= table id </TD> <TD> {drop.inst = dropTb( id )} </TD></TR>')
def p_DropDB(p):
'''DropDB : t_if t_exists
| empty'''
if p[1] == 'if':
concatenar_gramatica('\n <TR><TD> DROPDB ::= if exists </TD> <TD> { dropdb.val = if exists} </TD></TR>')
else:
concatenar_gramatica('\n <TR><TD> DROPDB ::= EMPTY </TD> <TD> { dropdb.val = empty.val } </TD></TR>')
def p_Alter(p):
'''Alter : t_database id AlterDB
| t_table id AlterTB '''
if p[1] == 'database':
concatenar_gramatica('\n <TR><TD> ALTER ::= database id ALTERDB </TD> <TD> alter.inst = alterDB( id,alterdb.inst ) </TD></TR>')
else:
concatenar_gramatica('\n <TR><TD> ALTER ::= table id ALTERTB </TD> <TD> alter.inst = altertb(id, altertb.inst) </TD></TR>')
def p_AlterDB(p):
''' AlterDB : t_rename t_to id
| t_owner t_to SesionDB '''
if p[1] == 'rename':
concatenar_gramatica('\n <TR><TD> ALTERDB ::= rename to id </TD> <TD> { alterdb.val = rename id } </TD></TR>')
else:
concatenar_gramatica('\n <TR><TD> ALTERDB ::= owner to SESIONDB </TD> <TD> { alterdb.val = owner sessiondb.val} </TD> </TR>')
def p_SesionDB(p):
''' SesionDB : id
| t_current_user
| t_session_user '''
if p[1] == 'current_user':
concatenar_gramatica('\n <TR><TD> SESSIONDB ::= current_user </TD> <TD> { sessiondb.val = current_user } </TD></TR>')
elif p[1] == 'session_user':
concatenar_gramatica('\n <TR><TD> SESSIONDB ::= session_user </TD> <TD> { sessiondb.val = session_user } </TD></TR>')
else:
concatenar_gramatica('\n <TR><TD> SESSIONDB ::= id </TD> <TD> { sessiondb.val = id } </TD></TR>')
def p_AlterTB(p):
''' AlterTB : t_add Add_Opc
| t_drop Drop_Opc
| t_alter t_column Alter_Column
| t_rename t_column id t_to id '''
if p[1] == 'add':
concatenar_gramatica('\n <TR><TD> ALTERTB ::= add ADD_OPC </TD> <TD> { altertb.inst = add(add_Opc.val) } </TD></TR>')
elif p[1] == 'drop':
concatenar_gramatica('\n <TR><TD> ALTERTB ::= drop DROP_OPC </TD> <TD> { altertb.inst = drop(drop_opc.val) } </TD></TR>')
elif p[1] == 'alter':
concatenar_gramatica('\n <TR><TD> ALTERTB ::= alter column ALTER_COLUMN </TD> <TD> { altertb.inst = alter(alter_column.val) } </TD></TR>')
elif p[1] == 'rename':
concatenar_gramatica('\n <TR><TD> ALTERTB ::= rename column id to id </TD> <TD> { altertb.inst = rename(id1,id2) } </TD></TR>')
def p_Add_Opc(p):
'''Add_Opc : t_column id Tipo
| t_foreign t_key par1 id par2 t_references id
| t_constraint id t_unique par1 id par2
| t_check EXP '''
if p[1] == 'column':
concatenar_gramatica('\n <TR><TD> ADD_OPC ::= column id TIPO </TD> <TD> { add_opc.inst = column(id, tipo.type) } </TD></TR>')
elif p[1] == 'foreign':
concatenar_gramatica('\n <TR><TD> ADD_OPC ::= foreign key ( id ) references id </TD> <TD> { add_opc.isnt = foreign(id1,id2)} </TD></TR>')
elif p[1] == 'constraint':
concatenar_gramatica('\n <TR><TD> ADD_OPC ::= constraint id unique ( id ) </TD> <TD> { add_opc.inst = constraint(id1,id2)} </TD></TR>')
elif p[1] == 'check':
concatenar_gramatica('\n <TR><TD> ADD_OPC ::= check EXP </TD> <TD> {add_opc.inst = check( exp.val )} </TD></TR>')
def p_Drop_Opc(p):
''' Drop_Opc : t_column id
| t_constraint id '''
if p[1] == 'column':
concatenar_gramatica('\n <TR><TD> DROP_OPC ::= column id TIPO </TD> <TD> {drop_opc.val = column,id }</TD></TR>')
elif p[1] == 'constraint':
concatenar_gramatica('\n <TR><TD> DROP_OPC ::= foreign key ( id ) references id </TD> <TD> { drop_opc.val = constraint,id} </TD></TR>')
def p_Alter_Column(p):
''' Alter_Column : id t_set t_not t_null
| Alter_Columns'''
if len(p) == 5:
concatenar_gramatica('\n <TR><TD> ALTER_COLUMN ::= id set not null </TD> <TD> { alter_column.val = id} </TD></TR>')
else:
concatenar_gramatica('\n <TR><TD> ALTER_COLUMN ::= ALTER_COLUMNS </TD> <TD> { alter_column.val = alter_columns.val } </TD></TR>')
def p_Alter_Columns(p):
''' Alter_Columns : Alter_Columns coma Alter_Column1
| Alter_Column1'''
if len(p) == 4:
concatenar_gramatica('\n <TR><TD> ALTER_COLUMNS ::= ALTER_COLUMNS , ALTER_COLUMN1 </TD> <TD> { alter_columns.lista.add(alter_column1.val) } </TD></TR>')
else:
concatenar_gramatica('\n <TR><TD> ALTER_COLUMNS ::= ALTER_COLUMN1 </TD> <TD> { alter_columns.lista = [alter_column.val]} </TD></TR>')
def p_Alter_Colum1(p):
'''Alter_Column1 : id t_type t_varchar par1 entero par2
| t_alter t_column id t_type t_varchar par1 entero par2'''
if p[1] == 'alter':
concatenar_gramatica('\n <TR><TD> ALTER_COLUMN1 ::= alter column id type varchar ( entero ) </TD> <TD> { alter_Column.inst = alter_column(id,varchar,entero) }</TD></TR>')
else:
concatenar_gramatica('\n <TR><TD> ALTER_COLUMN1 ::= id type varchar ( entero ) </TD> <TD> { alter_Column.inst = alter_Column(id,varchar,entero)} </TD></TR>')
def p_Create(p):
'Create : CreateDB'
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> CREATE ::= CREATEDB </TD> <TD> { create.val = createDB.val } </TD></TR>')
def p_Create1(p):
'Create : CreateTB '
concatenar_gramatica('\n <TR><TD> CREATE ::= CREATETB </TD> <TD> { create.val = createtb.val }</TD></TR>')
def p_CreateDB(p):
'''CreateDB : t_database Op1_DB
| t_or t_replace t_database Op1_DB'''
if len(p) == 3:
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> CREATEDB ::= database OP1_DB </TD> <TD> { createdb.inst = op1_db.val } </TD></TR>')
else:
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> CREATEDB ::= or replace database OP1_DB </TD> <TD> { createdb.inst = replace(op1_db.val)} </TD></TR>')
def p_Op1_DB(p):
''' Op1_DB : t_if t_not t_exists id Sesion
| id Sesion'''
if p[1] == 'if':
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> OP1_DB ::= if not exists id SESION </TD> <TD> { op1_db = id ,sesion.val} </TD></TR>')
else:
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> OP1_DB ::= id SESION </TD> <TD> { op1_db = id, sesion.val} </TD></TR>')
def p_Sesion(p):
''' Sesion : t_owner Op_Sesion Sesion_mode
| t_mode Op_Sesion
| empty '''
if p[1] == 'owner':
concatenar_gramatica('\n <TR><TD> SESION ::= owner OP_SESION SESION_MODE </TD> <TD> { session.val = owner Op_Sesion} </TD></TR>')
elif p[1] == 'mode':
concatenar_gramatica('\n <TR><TD> SESION ::= mode OP_SESION </TD> <TD> { session.val = mode Op_Sesion.val} </TD></TR>')
else:
concatenar_gramatica('\n <TR><TD> SESION ::= EMPTY </TD> <TD> { session.val = empty.val } </TD></TR>')
def p_Op_Sesion(p):
''' Op_Sesion : igual id
| id '''
p[0] = p[2]
concatenar_gramatica('\n <TR><TD> OP_SESION ::= = id </TD> <TD> { Op_sesion.val = id } </TD></TR>')
def p_Sesion_mode(p):
''' Sesion_mode : t_mode Op_Sesion
| empty '''
if len(p) == 3:
p[0] = p[2]
concatenar_gramatica('\n <TR><TD> SESION_MODE ::= mode OP_SESION </TD> <TD> {sesion_mode.val = mode op_sesion.val} </TD></TR>')
else:
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> SESION_MODE ::= EMPTY </TD> <TD> { sesion_mode.val = empty.val } </TD></TR>')
def p_CreateTB(p):
'CreateTB : t_table id par1 Columnas par2 Inherits '
concatenar_gramatica('\n <TR><TD> CREATETB ::= table id ( COLUMNAS ) INHERITS </TD> <TD> {createtb.inst = createtb(id,columnas.val,inherits.val)} </TD></TR>')
def p_Inherits(p):
''' Inherits : t_inherits par1 id par2
| empty '''
if len(p) == 5:
concatenar_gramatica('\n <TR><TD> INHERITS ::= inherits ( id ) </TD> <TD> { inherits.inst = inherits(id) } </TD></TR>')
else:
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> INHERITS ::= EMPTY </TD> <TD> { inherits.inst = empty.val } </TD></TR>')
def p_Columnas(p):
''' Columnas : Columnas coma Columna
| Columna '''
if len(p) == 4:
concatenar_gramatica('\n <TR><TD> COLUMNAS ::= COLUMNAS , COLUMNA </TD> columnas.val = concatenar(columna.aux , columna.val) <TD> </TD></TR>')
else:
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> COLUMNAS ::= COLUMNA </TD> <TD> columnas.aux = columna.val </TD></TR>')
def p_Columna(p):
''' Columna : id Tipo Cond_CreateTB
| Constraint'''
if len(p) == 4:
concatenar_gramatica('\n <TR><TD> COLUMNA ::= id TIPO COND_CREATETB </TD> <TD> {columna.val = cond_createtb.val} </TD></TR>')
else:
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> COLUMNA ::= CONSTRAINT </TD> <TD> { columna.val = constraint.val} </TD></TR>')
def p_Cond_CreateTB(p):
''' Cond_CreateTB : t_default id Cond_CreateTB
| t_not t_null Cond_CreateTB
| t_null Cond_CreateTB
| t_constraint id Opc_Constraint Cond_CreateTB
| t_primary t_key Cond_CreateTB
| t_references id Cond_CreateTB
| empty'''
if p[1] == 'default':
concatenar_gramatica('\n <TR><TD> COND_CREATETB ::= default id COND_CREATETB </TD> <TD> { cond_createtb.val = id cond_createtb.val } </TD></TR>')
elif p[1] == 'not':
concatenar_gramatica('\n <TR><TD> COND_CREATETB ::= not null COND_CREATETB </TD> <TD> { cond_createtb.val = cond_createtb.val } </TD></TR>')
elif p[1] == 'null':
concatenar_gramatica('\n <TR><TD> COND_CREATETB ::= null COND_CREATETB </TD> { cond_createtb.val = cond_createtb.val } </TD></TR>')
elif p[1] == 'constraint':
concatenar_gramatica('\n <TR><TD> COND_CREATETB ::= constraint id OPC_CONSTRAINT COND_CREATETB </TD> { cond_createtb.val = id opc_constraint.val cond_createtb.val } <TD> </TD></TR>')
elif p[1] == 'primary':
concatenar_gramatica('\n <TR><TD> COND_CREATETB ::= primary key COND_CREATETB </TD> <TD> { cond_createtb.val = cond_createtb.val } </TD></TR>')
elif p[1] == 'references':
concatenar_gramatica('\n <TR><TD> COND_CREATETB ::= references id COND_CREATETB </TD> <TD> { cond_createtb.val = id cond_createtb.val } </TD></TR>')
else:
concatenar_gramatica('\n <TR><TD> COND_CREATETB ::= EMPTY </TD> <TD> { cond_createtb.val = empty.val } </TD></TR>')
def p_Opc_Constraint(p):
''' Opc_Constraint : t_unique
| t_check par1 EXP par2 '''
if p[1] == 'unique':
concatenar_gramatica('\n <TR><TD> OPC_CONSTRAINT ::= unique </TD> <TD> { opc_constraint.val = unique }</TD></TR>')
elif p[1] == 'check':
concatenar_gramatica('\n <TR><TD> OPC_CONSTRAINT ::= check ( EXP ) </TD> <TD> { opc_constraint.val = check exp.val } </TD></TR>')
def p_Constraint(p):
''' Constraint : t_unique par1 Lista_ID par2
| t_constraint id t_check par1 EXP par2
| t_check par1 EXP par2
| t_primary t_key par1 Lista_ID par2
| t_foreign t_key par1 Lista_ID par2 t_references id par1 Lista_ID par2
| empty '''
if p[1] == 'unique':
concatenar_gramatica('\n <TR><TD> CONSTRAINT ::= unique ( LISTA_ID ) </TD> <TD> {constraint.inst = unique(lista_id.list)}</TD></TR>')
elif p[1] == 'constraint':
concatenar_gramatica('\n <TR><TD> CONSTRAINT ::= constraint id check ( EXP ) </TD> <TD> {constraint.inst = constraint(id,check,exp.val)}</TD></TR>')
elif p[1] == 'check':
concatenar_gramatica('\n <TR><TD> CONSTRAINT ::= check ( EXP ) </TD> <TD> { constraint.inst = check(exp.val)} </TD></TR>')
elif p[1] == 'primary':
concatenar_gramatica('\n <TR><TD> CONSTRAINT ::= primary key ( LISTA_ID ) </TD> <TD> {constraint.inst = primary(lista_id.list)} </TD> </TR>')
elif p[1] == 'foreign':
concatenar_gramatica('\n <TR><TD> CONSTRAINT ::= foreign key ( LISTA_ID ) references id ( LISTA_ID ) </TD> <TD> { constraint.inst = foreign(lista_id.lista,id,lista_id.lista)} </TD></TR>')
else:
concatenar_gramatica('\n <TR><TD> CONSTRAINT ::= EMPTY </TD> <TD> { constraint.inst = empty.val } </TD></TR>')
def p_Tipo(p):
''' Tipo : t_smallint
| t_integer
| t_bigint
| t_decimal
| t_numeric
| t_real
| t_double t_precision
| t_money
| t_character t_varying par1 Valor par2
| t_varchar par1 Valor par2
| t_character par1 Valor par2
| t_charn par1 Valor par2
| t_text
| t_boolean '''
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> TIPO ::= ' + str(p[1]) + '</TD> <TD> { tipo.type = ' + str(p[1]) + ' } </TD></TR>')
def p_Valor(p):
''' Valor : decimal
| entero
| string
| char
| t_true
| t_false'''
p[0] = Expression(p[1], p.slice[1].lineno, find_column(input, p.slice[1]), p.slice[1].type)
concatenar_gramatica('\n <TR><TD> VALOR ::= ' + str(p[1]) + '</TD> <TD> { Valor.val = ' + str(p[1]) + '} </TD></TR>')
def p_Valor2(p):
'Valor : id'
p[0] = Expression(p[1], p.slice[1].lineno, find_column(input, p.slice[1]))
concatenar_gramatica('\n <TR><TD> VALOR ::= id </TD> <TD> { valor.val = ' + str(p[1]) + ' } </TD></TR>')
def p_empty(p):
'empty :'
p[0] = []
concatenar_gramatica('\n <TR><TD> EMPTY ::= epsilon </TD> <TD> { } </TD></TR>')
# ----------------------------EXPRESIONES Y OPERACIONES---------------------------------------------------------------
def p_aritmeticas(p):
'''EXP : EXP mas EXP
| EXP menos EXP
| EXP asterisco EXP
| EXP div EXP
| EXP pot EXP
| EXP porcentaje EXP'''
p[0] = Expression(p[1], p[3], p.slice[2].value, p.slice[2].lineno, find_column(input, p.slice[2]),'Aritmetica')
concatenar_gramatica('\n <TR><TD> EXP ::= EXP' + str(p[2]) + ' EXP </TD> <TD> { Exp = Exp1.val ' + str(p[2]) + ' Exp2.val } </TD></TR>')
def p_parentesis(p):
'EXP : par1 EXP par2'
p[0] = p[2]
concatenar_gramatica('\n <TR><TD> EXP ::= ( EXP ) </TD> <TD> { exp.val = exp1.val } </TD></TR>')
def p_relacionales(p):
'''EXP : EXP mayor EXP
| EXP mayori EXP
| EXP menor EXP
| EXP menori EXP
| EXP igual EXP
| EXP diferente EXP
| EXP diferentede EXP'''
p[0] = Expression(p[1], p[3], p.slice[2].value, p.slice[2].lineno, find_column(input, p.slice[2]), 'Relacional')
concatenar_gramatica('\n <TR><TD> EXP ::= EXP' + str(p[2]) + ' EXP </TD> <TD> { Exp = Exp1.val ' + str(p[2]) + ' Exp2.val } </TD></TR>')
def p_logicos(p):
'''EXP : EXP t_and EXP
| EXP t_or EXP
'''
p[0] = Expression(p[1], p[3], p.slice[2].value, p.slice[2].lineno, find_column(input, p.slice[2]), 'Logica')
concatenar_gramatica('\n <TR><TD> EXP ::= EXP' + str(p[2]) + ' EXP </TD> <TD> { Exp = Exp1.val ' + str(p[2]) + ' Exp2.val } </TD></TR>')
def p_unario(p):
'''EXP : mas EXP %prec umas
| menos EXP %prec umenos
| t_not EXP'''
if p[1] == 'not':
p[0] = Expression(p.slice[1].value, p[2], p.slice[2].lineno, find_column(input, p.slice[2]), 'Unario')
concatenar_gramatica('\n <TR><TD> EXP ::= not EXP </TD> <TD> { Exp = Exp1.val } </TD></TR>')
else:
p[0] = Expression(p.slice[1].value, p[2], p.slice[2].lineno, find_column(input, p.slice[2]), 'Unario')
concatenar_gramatica('\n <TR><TD> EXP ::= ' + str(p[1]) + 'EXP %prec' + str(p[4]) +'</TD> <TD> { exp = exp1.val } </TD></TR>')
def p_EXP_Valor(p):
'EXP : Valor'
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> EXP ::= VALOR </TD> <TD> { exp.val = valor.val } </TD></TR>')
def p_EXP_Indices(p):
'''EXP : id punto id'''
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> EXP ::= id . id </TD> <TD> { exp.val = id1.val . id2.val } </TD></TR>')
def p_EXP_IndicesAS(p):
'''EXP : EXP t_as EXP'''
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> EXP ::= EXP as EXP </TD> <TD> { exp.val = exp1.val as exp2.val } </TD></TR>')
def p_exp_agregacion(p):
'''EXP : t_avg par1 EXP par2
| t_sum par1 EXP par2
| t_count par1 EXP par2
| t_max par1 EXP par2
| t_min par1 EXP par2'''
concatenar_gramatica('\n <TR><TD> EXP ::= ' + str(p[1]) + '( EXP ) </TD> <TD> { exp.val = ' + str(p[1]) + ' ( exp1.val ) } </TD></TR>')
def p_funciones_matematicas(p):
''' EXP : t_abs par1 EXP par2
| t_cbrt par1 EXP par2
| t_ceil par1 EXP par2
| t_ceiling par1 EXP par2
| t_degrees par1 EXP par2
| t_exp par1 EXP par2
| t_factorial par1 EXP par2
| t_floor par1 EXP par2
| t_ln par1 EXP par2
| t_log par1 EXP par2
| t_pi par1 par2
| t_radians par1 EXP par2
| t_round par1 EXP par2
| t_min_scale par1 EXP par2
| t_scale par1 EXP par2
| t_sign par1 EXP par2
| t_sqrt par1 EXP par2
| t_trim_scale par1 EXP par2
| t_trunc par1 EXP par2
| t_width_bucket par1 Lista_EXP par2
| t_random par1 par2
| t_setseed par1 EXP par2'''
concatenar_gramatica('\n <TR><TD> EXP ::= ' + str(p[1]) + '( EXP ) </TD> <TD> { exp.val = ' + str(p[1]) + ' ( exp1.val ) } </TD></TR>')
def p_funciones_matematicas(p):
''' EXP : t_div par1 EXP coma EXP par2
| t_gcd par1 EXP coma EXP par2
| t_mod par1 EXP coma EXP par2
| t_power par1 EXP coma EXP par2'''
concatenar_gramatica('\n <TR><TD> EXP ::= ' + str(p[1]) + '( EXP , EXP) </TD> <TD> { exp.val = ' + str(p[1]) + ' ( exp1.val , exp2.val) } </TD></TR>')
def p_funciones_Trigonometricas(p):
''' EXP : t_acos par1 EXP par2
| t_acosd par1 EXP par2
| t_asin par1 EXP par2
| t_asind par1 EXP par2
| t_atan par1 EXP par2
| t_atand par1 EXP par2
| t_cos par1 EXP par2
| t_cosd par1 EXP par2
| t_cot par1 EXP par2
| t_cotd par1 EXP par2
| t_sin par1 EXP par2
| t_sind par1 EXP par2
| t_tan par1 EXP par2
| t_tand par1 EXP par2 '''
concatenar_gramatica('\n <TR><TD> EXP ::= ' + str(p[1]) + '( EXP ) </TD> <TD> { exp.val = ' + str(p[1]) + ' ( exp1.val )} </TD></TR>')
def p_funciones_Trigonometricas1(p):
''' EXP : t_atan2 par1 EXP coma EXP par2
| t_atan2d par1 EXP coma EXP par2 '''
concatenar_gramatica('\n <TR><TD> EXP ::= ' + str(p[1]) + '( EXP , EXP ) </TD> <TD> { exp.val = ' + str(p[1]) + ' ( exp1.val , exp2.val ) } </TD></TR>')
def p_funciones_String_Binarias(p):
''' EXP : t_length par1 id par2
| t_substring par1 char coma entero coma entero par2
| t_trim par1 char par2
| t_md5 par1 char par2
| t_sha256 par1 par2
| t_substr par1 par2
| t_get_byte par1 par2
| t_set_byte par1 par2
| t_convert par1 EXP t_as Tipo par2
| t_encode par1 par2
| t_decode par1 par2 '''
if p[1] == 'substring':
concatenar_gramatica('\n <TR><TD> EXP ::= substring ( char , integer , integer ) </TD> <TD> { exp.val = substring ( char, integer,integer } </TD></TR>')
elif p[1] == 'convert':
concatenar_gramatica('\n <TR><TD> EXP ::= convert ( EXP as TIPO ) </TD> <TD> { exp.val = conver( exp.val , Tipo.type )} </TD></TR>')
else:
concatenar_gramatica('\n <TR><TD> EXP ::= ' + str(p[1]) + '( EXP ) </TD> <TD> { exp.val = ' + str(p[1]) + ' ( exp1.val ) }</TD></TR>')
# --------------------------------------Listas Fundamentales--------------------------------------------
def p_Lista_ID(p):
'''Lista_ID : Lista_ID coma id
| id '''
if len(p) == 4:
p[0] = p[1] + [p[3]]
concatenar_gramatica('\n <TR><TD> LISTA_ID ::= LISTA_ID , id </TD> <TD> { lista_id.val = concatenar (lista_id.aux, lista_id.val) } </TD></TR>')
else:
p[0] = [p[1]]
concatenar_gramatica('\n <TR><TD> LISTA_ID ::= id </TD> <TD> { lista_id.aux = id } </TD></TR>')
def p_Lista_Enum(p):
'''Lista_Enum : Lista_Enum coma char
| char '''
print(len(p))
if len(p) == 4:
p[0] = p[1]+[p[3]]
concatenar_gramatica('\n <TR><TD> LISTA_ENUM ::= LISTA_ENUM , char </TD> <TD> { lista_enum.lista = lista_enum.lista.add(lista_enum.aux) } </TD></TR>')
else:
p[0] = [p[1]]
concatenar_gramatica('\n <TR><TD> LISTA_ENUM ::= char </TD> <TD> { lista_enum.lista = [char] } </TD></TR>')
def p_Lista_EXP(p):
'''Lista_EXP : Lista_EXP coma EXP
| EXP '''
if len(p) == 4:
p[0] = p[1]+[p[3]]
concatenar_gramatica('\n <TR><TD> LISTA_EXP ::= LISTA_EXP , EXP </TD> <TD> { lista_exp.val = concatenar (lista_exp.aux , lista_exp.val) }</TD></TR>')
else:
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> LISTA_EXP ::= EXP </TD> <TD> { lista_exp.aux = char } </TD></TR>')
def p_Lista_Alias(p):
'''Lista_Alias : Lista_Alias coma Nombre_Alias
| Nombre_Alias '''
if len(p) == 4:
p[0] = p[1]+[p[3]]
concatenar_gramatica('\n <TR><TD> LISTA_ALIAS ::= LISTA_ALIAS , Nombre_Alias </TD> <TD> { lista_alias.val = concatenar ( lista_alias.aux , lista_alias.val) }</TD></TR>')
else:
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> LISTA_ALIAS ::= Nombre_Alias </TD> <TD> { lista_alias.aux = nombre_alias.val } </TD></TR>')
def p_Nombre_Alias(p):
'''Nombre_Alias : id id'''
p[0] = p[1]
concatenar_gramatica('\n <TR><TD> NOMBRE_ALIAS ::= id id </TD> <TD> { nombre_alias.val = id1 id2}</TD></TR>')
def p_error(p):
if not p:
print('end of file')
ListaErrores.insertar(err.Nodo_Error("Sintactico", "Se esperaba mas pero llega fin de texto", input.count('\n'), len(input)))
return
ListaErrores.insertar(err.Nodo_Error("Sintactico", str(p.value),
p.lineno, find_column(input, p)))
while True:
tok = parser.token()
if not tok or tok.type == 'pyc':
break
def concatenar_gramatica(cadena):
global reporteg
reporteg = cadena + reporteg
def parse(input1, errores1):
global input
global ListaErrores
global reporteg
ListaErrores = errores1
reporteg = ''
input = input1
global parser
parser = yacc.yacc()
parser.errok()
par = parser.parse(input, tracking=True, lexer=lexer)
return par
|
en
| 0.229756
|
# Tokens # se remueven comillas # se remueven comillas # Check for reserved words # Comentario simple // ... # Caracteres ignorados # Between , in , like, ilike, simiar, is isnull notnull #def p_Sentencia_SQL_DML(p): # 'Sentencia_SQL : EXP pyc' # p[0] = Sentencia("EXP", [p[1]]) # -------------------------------------------------------------SENTENCIAS DML Sentencias_DML : t_select Lista_EXP Select_SQL Condiciones pyc | t_select asterisco Select_SQL Condiciones pyc | t_insert t_into id Insert_SQL pyc | t_update id t_set Lista_EXP t_where EXP pyc | t_delete t_from id Condiciones pyc | t_use id pyc Table_Expression : Alias_Tabla | Subqueries Alias_Tabla : Lista_ID | Lista_Alias Subqueries : par1 t_select par2 # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> INSERT <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< Condiciones : t_where EXP | empty # ---------------------------- Sentencias DDL y Enum Type -------------- Sentencias_DDL : t_show t_databases pyc | Enum_Type | t_drop Drop pyc | t_alter Alter pyc | t_create Create pyc Drop : t_database DropDB id | t_table id DropDB : t_if t_exists | empty Alter : t_database id AlterDB | t_table id AlterTB AlterDB : t_rename t_to id | t_owner t_to SesionDB SesionDB : id | t_current_user | t_session_user AlterTB : t_add Add_Opc | t_drop Drop_Opc | t_alter t_column Alter_Column | t_rename t_column id t_to id Add_Opc : t_column id Tipo | t_foreign t_key par1 id par2 t_references id | t_constraint id t_unique par1 id par2 | t_check EXP Drop_Opc : t_column id | t_constraint id Alter_Column : id t_set t_not t_null | Alter_Columns Alter_Columns : Alter_Columns coma Alter_Column1 | Alter_Column1 Alter_Column1 : id t_type t_varchar par1 entero par2 | t_alter t_column id t_type t_varchar par1 entero par2 CreateDB : t_database Op1_DB | t_or t_replace t_database Op1_DB Op1_DB : t_if t_not t_exists id Sesion | id Sesion Sesion : t_owner Op_Sesion Sesion_mode | t_mode Op_Sesion | empty Op_Sesion : igual id | id Sesion_mode : t_mode Op_Sesion | empty Inherits : t_inherits par1 id par2 | empty Columnas : Columnas coma Columna | Columna Columna : id Tipo Cond_CreateTB | Constraint Cond_CreateTB : t_default id Cond_CreateTB | t_not t_null Cond_CreateTB | t_null Cond_CreateTB | t_constraint id Opc_Constraint Cond_CreateTB | t_primary t_key Cond_CreateTB | t_references id Cond_CreateTB | empty Opc_Constraint : t_unique | t_check par1 EXP par2 Constraint : t_unique par1 Lista_ID par2 | t_constraint id t_check par1 EXP par2 | t_check par1 EXP par2 | t_primary t_key par1 Lista_ID par2 | t_foreign t_key par1 Lista_ID par2 t_references id par1 Lista_ID par2 | empty Tipo : t_smallint | t_integer | t_bigint | t_decimal | t_numeric | t_real | t_double t_precision | t_money | t_character t_varying par1 Valor par2 | t_varchar par1 Valor par2 | t_character par1 Valor par2 | t_charn par1 Valor par2 | t_text | t_boolean Valor : decimal | entero | string | char | t_true | t_false # ----------------------------EXPRESIONES Y OPERACIONES--------------------------------------------------------------- EXP : EXP mas EXP | EXP menos EXP | EXP asterisco EXP | EXP div EXP | EXP pot EXP | EXP porcentaje EXP EXP : EXP mayor EXP | EXP mayori EXP | EXP menor EXP | EXP menori EXP | EXP igual EXP | EXP diferente EXP | EXP diferentede EXP EXP : EXP t_and EXP | EXP t_or EXP EXP : mas EXP %prec umas | menos EXP %prec umenos | t_not EXP EXP : id punto id EXP : EXP t_as EXP EXP : t_avg par1 EXP par2 | t_sum par1 EXP par2 | t_count par1 EXP par2 | t_max par1 EXP par2 | t_min par1 EXP par2 EXP : t_abs par1 EXP par2 | t_cbrt par1 EXP par2 | t_ceil par1 EXP par2 | t_ceiling par1 EXP par2 | t_degrees par1 EXP par2 | t_exp par1 EXP par2 | t_factorial par1 EXP par2 | t_floor par1 EXP par2 | t_ln par1 EXP par2 | t_log par1 EXP par2 | t_pi par1 par2 | t_radians par1 EXP par2 | t_round par1 EXP par2 | t_min_scale par1 EXP par2 | t_scale par1 EXP par2 | t_sign par1 EXP par2 | t_sqrt par1 EXP par2 | t_trim_scale par1 EXP par2 | t_trunc par1 EXP par2 | t_width_bucket par1 Lista_EXP par2 | t_random par1 par2 | t_setseed par1 EXP par2 EXP : t_div par1 EXP coma EXP par2 | t_gcd par1 EXP coma EXP par2 | t_mod par1 EXP coma EXP par2 | t_power par1 EXP coma EXP par2 EXP : t_acos par1 EXP par2 | t_acosd par1 EXP par2 | t_asin par1 EXP par2 | t_asind par1 EXP par2 | t_atan par1 EXP par2 | t_atand par1 EXP par2 | t_cos par1 EXP par2 | t_cosd par1 EXP par2 | t_cot par1 EXP par2 | t_cotd par1 EXP par2 | t_sin par1 EXP par2 | t_sind par1 EXP par2 | t_tan par1 EXP par2 | t_tand par1 EXP par2 EXP : t_atan2 par1 EXP coma EXP par2 | t_atan2d par1 EXP coma EXP par2 EXP : t_length par1 id par2 | t_substring par1 char coma entero coma entero par2 | t_trim par1 char par2 | t_md5 par1 char par2 | t_sha256 par1 par2 | t_substr par1 par2 | t_get_byte par1 par2 | t_set_byte par1 par2 | t_convert par1 EXP t_as Tipo par2 | t_encode par1 par2 | t_decode par1 par2 # --------------------------------------Listas Fundamentales-------------------------------------------- Lista_ID : Lista_ID coma id | id Lista_Enum : Lista_Enum coma char | char Lista_EXP : Lista_EXP coma EXP | EXP Lista_Alias : Lista_Alias coma Nombre_Alias | Nombre_Alias Nombre_Alias : id id
| 1.98308
| 2
|
src/licensedcode/models.py
|
chetanya-shrimali/scancode-toolkit
| 0
|
6625699
|
<gh_stars>0
#
# Copyright (c) 2017 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
import codecs
from collections import Counter
from collections import defaultdict
from collections import namedtuple
from collections import OrderedDict
from itertools import chain
from operator import itemgetter
from os.path import exists
from os.path import join
from commoncode.fileutils import file_base_name
from commoncode.fileutils import file_name
from commoncode.fileutils import file_iter
from textcode.analysis import text_lines
from licensedcode import MIN_MATCH_LENGTH
from licensedcode import MIN_MATCH_HIGH_LENGTH
from licensedcode import licenses_data_dir
from licensedcode import rules_data_dir
from licensedcode import saneyaml
from licensedcode.tokenize import rule_tokenizer
from licensedcode.tokenize import query_tokenizer
from commoncode import fileutils
"""
Reference License and license Rule structures persisted as a combo of a YAML
data file and one or more text files containing license or notice texts.
"""
# Set to True to print detailed representations of objects when tracing
TRACE_REPR = False
class License(object):
"""
A license consists of these files, where <key> is the license key:
- <key>.yml : the license data in YAML
- <key>.LICENSE: the license text
"""
# we do not really need slots but they help keep the attributes in check
__slots__ = (
'key',
'src_dir',
'is_deprecated',
'short_name',
'name',
'category',
'owner',
'homepage_url',
'notes',
'is_exception',
'next_version',
'is_or_later',
'base_license',
'spdx_license_key',
'text_urls',
'osi_url',
'faq_url',
'other_urls',
'data_file',
'text_file',
'minimum_coverage',
'standard_notice',
)
def __init__(self, key=None, src_dir=licenses_data_dir):
"""
Initialize a License for a `key` and data stored in the `src_dir`
directory. Key is a lower-case unique ascii string.
"""
# unique key: lower case ASCII characters, digits, underscore and dots.
self.key = key or ''
self.src_dir = src_dir
# if this is a deprecated license, add also notes explaining why
self.is_deprecated = False
# commonly used short name, often abbreviated.
self.short_name = ''
# full name.
self.name = ''
# Permissive, Copyleft, etc
self.category = ''
self.owner = ''
self.homepage_url = ''
self.notes = ''
# if this is a license exception, the license key this exception applies to
self.is_exception = False
# FIXME: this is WAY too complicated and likely not needed
# license key for the next version of this license if any
self.next_version = ''
# True if this license allows later versions to be used
self.is_or_later = False
# If is_or_later is True, license key for the not "or later" variant if any
self.base_license = ''
# SPDX key for SPDX licenses
self.spdx_license_key = ''
# Various URLs for info
self.text_urls = []
self.osi_url = ''
self.faq_url = ''
self.other_urls = []
self.minimum_coverage = 0
self.standard_notice = ''
# data file paths and known extensions
self.data_file = ''
self.text_file = ''
if self.src_dir:
self.set_file_paths()
if exists(self.data_file):
self.load(src_dir)
def __repr__(self, *args, **kwargs):
return 'License(key="{}")'.format(self.key)
def set_file_paths(self):
self.data_file = join(self.src_dir, self.key + '.yml')
self.text_file = join(self.src_dir, self.key + '.LICENSE')
def relocate(self, target_dir, new_key=None):
"""
Return a copy of this license object relocated to a new `src_dir`.
The data and license text files are persisted in the new `src_dir`.
"""
if not target_dir or target_dir == self.src_dir:
raise ValueError(
'Cannot relocate a License to empty directory or same directory.')
if new_key:
key = new_key
else:
key = self.key
newl = License(key, target_dir)
# copy attributes
excluded_attrs = ('key', 'src_dir', 'data_file', 'text_file',)
attrs = [a for a in self.__slots__ if a not in excluded_attrs]
for name in attrs:
setattr(newl, name, getattr(self, name))
# save it all to files
if self.text:
fileutils.copyfile(self.text_file, newl.text_file)
newl.dump()
return newl
def update(self, mapping):
for k, v in mapping.items():
setattr(self, k, v)
def __copy__(self):
oldl = self.to_dict()
newl = License(key=self.key)
newl.update(oldl)
return newl
@property
def text(self):
"""
License text, re-loaded on demand.
"""
return self._read_text(self.text_file)
def to_dict(self):
"""
Return an OrderedDict of license data (excluding texts).
Fields with empty values are not included.
"""
data = OrderedDict()
data['key'] = self.key
if self.short_name:
data['short_name'] = self.short_name
if self.name:
data['name'] = self.name
if self.is_deprecated:
data['is_deprecated'] = self.is_deprecated
if self.category:
data['category'] = self.category
if self.owner:
data['owner'] = self.owner
if self.homepage_url:
data['homepage_url'] = self.homepage_url
if self.notes:
data['notes'] = self.notes
if self.is_exception:
data['is_exception'] = self.is_exception
if self.next_version:
data['next_version'] = self.next_version
if self.is_or_later:
data['is_or_later'] = self.is_or_later
if self.base_license:
data['base_license'] = self.base_license
if self.spdx_license_key:
data['spdx_license_key'] = self.spdx_license_key
if self.text_urls:
data['text_urls'] = self.text_urls
if self.osi_url:
data['osi_url'] = self.osi_url
if self.faq_url:
data['faq_url'] = self.faq_url
if self.other_urls:
data['other_urls'] = self.other_urls
if self.minimum_coverage:
data['minimum_coverage'] = int(self.minimum_coverage)
if self.standard_notice:
data['standard_notice'] = self.standard_notice
return data
def dump(self):
"""
Dump a representation of self as multiple files named
this way:
- <key>.yml : the license data in YAML
- <key>.LICENSE: the license text
"""
as_yaml = saneyaml.dump(self.to_dict())
self._write(self.data_file, as_yaml)
if self.text:
self._write(self.text_file, self.text)
def _write(self, f, d):
with codecs.open(f, 'wb', encoding='utf-8') as of:
of.write(d)
def load(self, src_dir):
"""
Populate license data from a YAML file stored in of src_dir.
Does not load text files.
Unknown fields are ignored and not bound to the License object.
"""
try:
with codecs.open(self.data_file, encoding='utf-8') as f:
data = saneyaml.load(f.read())
except Exception, e:
# this is a rare case: fail loudly
print()
print('#############################')
print('INVALID LICENSE YAML FILE:', self.data_file)
print('#############################')
print(e)
print('#############################')
raise
numeric_keys = ('minimum_coverage',)
for k, v in data.items():
if k in numeric_keys:
v = int(v)
if k == 'key':
assert self.key == v, 'Inconsistent YAML key and file names for %r' % self.key
setattr(self, k, v)
def _read_text(self, location):
if not exists(location):
text = ''
else:
with codecs.open(location, encoding='utf-8') as f:
text = f.read()
return text
@staticmethod
def validate(licenses, verbose=False, no_dupe_urls=False):
"""
Check that licenses are valid. `licenses` is a mapping of key ->
License. Return dictionaries of infos, errors and warnings mapping a
license key to validation issue messages. Print messages if verbose is
True.
"""
infos = defaultdict(list)
warnings = defaultdict(list)
errors = defaultdict(list)
# used for global dedupe of texts
by_spdx_key = defaultdict(list)
by_text = defaultdict(list)
for key, lic in licenses.items():
err = errors[key].append
warn = warnings[key].append
info = infos[key].append
if not lic.short_name:
warn('No short name')
if not lic.name:
warn('No name')
if not lic.category:
warn('No category')
if not lic.owner:
warn('No owner')
if lic.next_version and lic.next_version not in licenses:
err('License next version is unknown')
if (lic.is_or_later and
lic.base_license and
lic.base_license not in licenses):
err('Base license for an "or later" license is unknown')
# URLS dedupe and consistency
if no_dupe_urls:
if lic.text_urls and not all(lic.text_urls):
warn('Some empty license text_urls')
if lic.other_urls and not all(lic.other_urls):
warn('Some empty license other_urls')
# redundant URLs used multiple times
if lic.homepage_url:
if lic.homepage_url in lic.text_urls:
warn('Homepage URL also in text_urls')
if lic.homepage_url in lic.other_urls:
warn('Homepage URL also in other_urls')
if lic.homepage_url == lic.faq_url:
warn('Homepage URL same as faq_url')
if lic.homepage_url == lic.osi_url:
warn('Homepage URL same as osi_url')
if lic.osi_url or lic.faq_url:
if lic.osi_url == lic.faq_url:
warn('osi_url same as faq_url')
all_licenses = lic.text_urls + lic.other_urls
for url in lic.osi_url, lic.faq_url, lic.homepage_url:
if url: all_licenses.append(url)
if not len(all_licenses) == len(set(all_licenses)):
warn('Some duplicated URLs')
# local text consistency
text = lic.text
license_qtokens = tuple(query_tokenizer(text, lower=True))
license_rtokens = tuple(rule_tokenizer(text, lower=True))
if license_qtokens != license_rtokens:
info('License text contains rule templated region with {{}}')
if not license_qtokens:
info('No license text')
else:
# for global dedupe
by_text[license_qtokens].append(key + ': TEXT')
# SPDX consistency
if lic.spdx_license_key:
by_spdx_key[lic.spdx_license_key].append(key)
# global SPDX consistency
multiple_spdx_keys_used = {k: v for k, v in by_spdx_key.items() if len(v) > 1}
if multiple_spdx_keys_used:
for k, lkeys in multiple_spdx_keys_used.items():
infos['GLOBAL'].append('SPDX key: ' + k + ' used in multiple licenses: ' + ', '.join(sorted(lkeys)))
# global text dedupe
multiple_texts = {k: v for k, v in by_text.items() if len(v) > 1}
if multiple_texts:
for k, msgs in multiple_texts.items():
errors['GLOBAL'].append('Duplicate texts in multiple licenses:' + ', '.join(sorted(msgs)))
errors = {k: v for k, v in errors.items() if v}
warnings = {k: v for k, v in warnings.items() if v}
infos = {k: v for k, v in infos.items() if v}
if verbose:
print('Licenses validation errors:')
for key, msgs in sorted(errors.items()):
print('ERRORS for:', key, ':', '\n'.join(msgs))
print('Licenses validation warnings:')
for key, msgs in sorted(warnings.items()):
print('WARNINGS for:', key, ':', '\n'.join(msgs))
print('Licenses validation infos:')
for key, msgs in sorted(infos.items()):
print('INFOS for:', key, ':', '\n'.join(msgs))
return errors, warnings, infos
def load_licenses(licenses_data_dir=licenses_data_dir , with_deprecated=False):
"""
Return a mapping of key -> license objects, loaded from license files.
"""
licenses = {}
for data_file in file_iter(licenses_data_dir):
if not data_file.endswith('.yml'):
continue
key = file_base_name(data_file)
lic = License(key, licenses_data_dir)
if not with_deprecated and lic.is_deprecated:
continue
licenses[key] = lic
return licenses
def get_rules(licenses_data_dir=licenses_data_dir, rules_data_dir=rules_data_dir):
"""
Return a mapping of key->license and an iterable of license detection
rules loaded from licenses and rules files. Raise a MissingLicenses
exceptions if a rule references unknown license keys.
"""
from licensedcode.cache import get_licenses_db
licenses = get_licenses_db(licenses_data_dir=licenses_data_dir)
rules = list(load_rules(rules_data_dir=rules_data_dir))
check_rules_integrity(rules, licenses)
licenses_as_rules = build_rules_from_licenses(licenses)
return chain(licenses_as_rules, rules)
class MissingLicenses(Exception):
pass
def check_rules_integrity(rules, licenses):
"""
Given a lists of rules, check that all the rule license keys
reference a known license from a mapping of licenses (key->license).
Raise a MissingLicense exception with a message containing the list
of rule files without a corresponding license.
"""
invalid_rules = defaultdict(set)
for rule in rules:
unknown_keys = [key for key in rule.licenses if key not in licenses]
if unknown_keys:
invalid_rules[rule.data_file].update(unknown_keys)
if invalid_rules:
invalid_rules = (
' '.join(keys) + '\n' +
'file://' + data_file + '\n' +
'file://' + data_file.replace('.yml', '.RULE') + '\n'
for data_file, keys in invalid_rules.iteritems() if keys)
msg = 'Rules referencing missing licenses:\n' + '\n'.join(sorted(invalid_rules))
raise MissingLicenses(msg)
def build_rules_from_licenses(licenses):
"""
Return an iterable of rules built from each license text from a `licenses`
iterable of license objects. Use the reference list if `licenses` is not
provided.
Load the reference license list from disk if `licenses` is not provided.
"""
for license_key, license_obj in licenses.iteritems():
text_file = join(license_obj.src_dir, license_obj.text_file)
minimum_coverage = license_obj.minimum_coverage
if exists(text_file):
yield Rule(text_file=text_file, licenses=[license_key],
minimum_coverage=minimum_coverage, is_license=True)
def load_rules(rules_data_dir=rules_data_dir):
"""
Return an iterable of rules loaded from rule files.
"""
# TODO: OPTIMIZE: create a graph of rules to account for containment and similarity clusters?
# TODO: we should assign the rule id at that stage
seen_files = set()
processed_files = set()
lower_case_files = set()
case_problems = set()
for data_file in file_iter(rules_data_dir):
if data_file.endswith('.yml'):
base_name = file_base_name(data_file)
rule_file = join(rules_data_dir, base_name + '.RULE')
yield Rule(data_file=data_file, text_file=rule_file)
# accumulate sets to ensures we do not have illegal names or extra
# orphaned files
data_lower = data_file.lower()
if data_lower in lower_case_files:
case_problems.add(data_lower)
else:
lower_case_files.add(data_lower)
rule_lower = rule_file.lower()
if rule_lower in lower_case_files:
case_problems.add(rule_lower)
else:
lower_case_files.add(rule_lower)
processed_files.update([data_file, rule_file])
if not data_file.endswith('~'):
seen_files.add(data_file)
unknown_files = seen_files - processed_files
if unknown_files or case_problems:
if unknown_files:
files = '\n'.join(sorted(unknown_files))
msg = 'Orphaned files in rule directory: %(rules_data_dir)r\n%(files)s'
if case_problems:
files = '\n'.join(sorted(case_problems))
msg += '\nRule files with non-unique name ignoring casein rule directory: %(rules_data_dir)r\n%(files)s'
raise Exception(msg % locals())
Thresholds = namedtuple('Thresholds', ['high_len', 'low_len', 'length', 'small', 'min_high', 'min_len'])
class Rule(object):
"""
A detection rule object is a text to use for detection and corresponding
detected licenses and metadata. A rule text can contain variable parts
marked with double curly braces {{ }}.
"""
__slots__ = (
'rid', 'identifier',
'licenses', 'license_choice', 'license', 'licensing_identifier',
'false_positive',
'notes',
'data_file', 'text_file', '_text',
'length', 'low_length', 'high_length', '_thresholds',
'length_unique', 'low_unique', 'high_unique', '_thresholds_unique',
'minimum_coverage', 'relevance', 'has_stored_relevance',
'is_license'
)
def __init__(self, data_file=None, text_file=None, licenses=None,
license_choice=False, notes=None, minimum_coverage=0,
is_license=False, _text=None):
###########
# FIXME: !!! TWO RULES MAY DIFFER BECAUSE THEY ARE UPDATED BY INDEXING
###########
# optional rule id int typically assigned at indexing time
self.rid = None
if not text_file:
assert _text
self.identifier = '_tst_' + str(len(_text))
else:
self.identifier = file_name(text_file)
# list of valid license keys
self.licenses = licenses or []
# True if the rule is for a choice of all licenses. default to False
self.license_choice = license_choice
# License expression
# TODO: implement me.
self.license = ''
# is this rule text a false positive when matched? (filtered out) FIXME: this
# should be unified with the relevance: a false positive match is a a match
# with a relevance of zero
self.false_positive = False
# is this rule text only to be matched with a minimum coverage?
self.minimum_coverage = minimum_coverage
# optional, free text
self.notes = notes
# what is the relevance of a match to this rule text? a float between 0 and
# 100 where 100 means highly relevant and 0 menas not relevant at all.
# For instance a match to the "gpl" or the "cpol" words have a fairly low
# relevance as they are a weak indication of an actual license and could be
# a false positive. In somce cases, this may even be used to discard obvious
# false positive matches automatically.
self.relevance = 100
self.has_stored_relevance = False
# set to True if the rule is built from a .LICENSE full text
self.is_license = is_license
# path to the YAML data file for this rule
self.data_file = data_file
if data_file:
try:
self.load()
except Exception as e:
message = 'While loading: %(data_file)r' % locals() + e.message
print(message)
raise Exception(message)
# licensing identifier: TODO: replace with a license expression
self.licensing_identifier = tuple(self.licenses) + (license_choice,)
# path to the rule text file
self.text_file = text_file
# for testing only, when we do not use a file
self._text = _text
# These attributes are computed upon text loading or setting the thresholds
###########################################################################
# length in number of token strings
self.length = 0
# lengths in token ids, including high/low token counts, set in indexing
self.high_length = 0
self.low_length = 0
self._thresholds = None
# lengths in token ids, including high/low token counts, set in indexing
self.high_unique = 0
self.low_unique = 0
self.length_unique = 0
self._thresholds_unique = None
def tokens(self, lower=True):
"""
Return an iterable of token strings for this rule. Length is recomputed as a
side effect. Tokens inside double curly braces (eg. {{ignored}}) are skipped
and ignored.
"""
length = 0
text = self.text()
text = text.strip()
# FIXME: this is weird:
# We tag this rule as being a bare URL if it starts with a scheme and is on one line: this is used to determine a matching approach
if text.startswith(('http://', 'https://', 'ftp://')) and '\n' not in text[:1000]:
self.minimum_coverage = 100
for token in rule_tokenizer(self.text(), lower=lower):
length += 1
yield token
self.length = length
self.compute_relevance()
def text(self):
"""
Return the rule text loaded from its file.
"""
# used for test only
if self._text:
return self._text
elif self.text_file and exists(self.text_file):
# IMPORTANT: use the same process as query text loading for symmetry
lines = text_lines(self.text_file, demarkup=False)
return ' '.join(lines)
else:
raise Exception('Inconsistent rule text for:', self.identifier)
def __repr__(self):
idf = self.identifier
ird = self.rid
if TRACE_REPR:
text = self.text()
else:
text = self.text()
if text:
text = text[:20] + '...'
keys = self.licenses
choice = self.license_choice
fp = self.false_positive
minimum_coverage = self.minimum_coverage
return 'Rule(%(idf)r, lics=%(keys)r, fp=%(fp)r, minimum_coverage=%(minimum_coverage)r, %(text)r)' % locals()
def same_licensing(self, other):
"""
Return True if the other rule has a the same licensing as this rule.
"""
# TODO: include license expressions
return self.licensing_identifier == other.licensing_identifier
def licensing_contains(self, other):
"""
Return True if the other rule licensing is contained in this rule licensing.
"""
# TODO: include license expressions
return set(self.licensing_identifier).issuperset(other.licensing_identifier)
def negative(self):
"""
Return True if this Rule does not point to real licenses and is
therefore a "negative" rule denoting that a match to this rule should be
ignored.
"""
return not self.licenses and not self.false_positive
def small(self):
"""
Is this a small rule? It needs special handling for detection.
"""
SMALL_RULE = 15
return self.length < SMALL_RULE or self.minimum_coverage == 100
def thresholds(self):
"""
Return a Thresholds tuple considering the occurrence of all tokens.
"""
if not self._thresholds:
min_high = min([self.high_length, MIN_MATCH_HIGH_LENGTH])
min_len = MIN_MATCH_LENGTH
# note: we cascade ifs from largest to smallest lengths
# FIXME: this is not efficient
if self.length < 30:
min_len = self.length // 2
if self.length < 10:
min_high = self.high_length
min_len = self.length
self.minimum_coverage = 80
if self.length < 3:
min_high = self.high_length
min_len = self.length
self.minimum_coverage = 100
if self.minimum_coverage == 100:
min_high = self.high_length
min_len = self.length
self._thresholds = Thresholds(
self.high_length, self.low_length, self.length,
self.small(), min_high, min_len
)
return self._thresholds
def thresholds_unique(self):
"""
Return a Thresholds tuple considering the occurrence of only unique tokens.
"""
if not self._thresholds_unique:
highu = (int(self.high_unique // 2)) or self.high_unique
min_high = min([highu, MIN_MATCH_HIGH_LENGTH])
min_len = MIN_MATCH_LENGTH
# note: we cascade IFs from largest to smallest lengths
if self.length < 20:
min_high = self.high_unique
min_len = min_high
if self.length < 10:
min_high = self.high_unique
if self.length_unique < 2:
min_len = self.length_unique
else:
min_len = self.length_unique - 1
if self.length < 5:
min_high = self.high_unique
min_len = self.length_unique
if self.minimum_coverage == 100:
min_high = self.high_unique
min_len = self.length_unique
self._thresholds_unique = Thresholds(
self.high_unique, self.low_unique, self.length_unique,
self.small(), min_high, min_len)
return self._thresholds_unique
def to_dict(self):
"""
Return an OrderedDict of self, excluding texts. Used for serialization.
Empty values are not included.
"""
data = OrderedDict()
if self.licenses:
data['licenses'] = self.licenses
if self.license_choice:
data['license_choice'] = self.license_choice
if self.license:
data['license'] = self.license
if self.false_positive:
data['false_positive'] = self.false_positive
if self.has_stored_relevance:
data['relevance'] = self.relevance
if self.minimum_coverage:
data['minimum_coverage'] = self.minimum_coverage
if self.notes:
data['notes'] = self.note
return data
def dump(self):
"""
Dump a representation of self to tgt_dir as two files:
- a .yml for the rule data in YAML block format
- a .RULE: the rule text as a UTF-8 file
"""
if self.data_file:
as_yaml = saneyaml.dump(self.to_dict())
with codecs.open(self.data_file, 'wb', encoding='utf-8') as df:
df.write(as_yaml)
with codecs.open(self.text_file, 'wb', encoding='utf-8') as tf:
tf.write(self.text())
def load(self, load_notes=False):
"""
Load self from a .RULE YAML file stored in self.data_file.
Does not load the rule text file.
Unknown fields are ignored and not bound to the Rule object.
"""
try:
with codecs.open(self.data_file, encoding='utf-8') as f:
data = saneyaml.load(f.read())
except Exception, e:
print('#############################')
print('INVALID LICENSE RULE FILE:', self.data_file)
print('#############################')
print(e)
print('#############################')
# this is a rare case, but yes we abruptly stop.
raise e
self.licenses = data.get('licenses', [])
self.license_choice = data.get('license_choice', False)
self.license = data.get('license')
self.false_positive = data.get('false_positive', False)
relevance = data.get('relevance')
if relevance is not None:
# Keep track if we have a stored relevance of not.
self.has_stored_relevance = True
self.relevance = float(relevance)
self.minimum_coverage = float(data.get('minimum_coverage', 0))
# these are purely informational and not used at run time
if load_notes:
notes = data.get('notes')
if notes:
self.notes = notes.strip()
return self
def compute_relevance(self):
"""
Compute and set the `relevance` attribute for this rule. The
relevance is a float between 0 and 100 where 100 means highly
relevant and 0 means not relevant at all.
It is either pre-defined in the rule YAML data file with the
relevance attribute or computed here using this approach:
- a rule of length up to 20 receives 5 relevance points per token (so a rule
of length 1 has a 5 relevance and a rule of length 20 has a 100 relevance)
- a rule of length over 20 has a 100 relevance
- a false positive or a negative rule has a relevance of zero.
For instance a match to the "gpl" or the "cpol" words have a fairly low
relevance as they are a weak indication of an actual license and could be a
false positive and should therefore be assigned a low relevance. In contrast
a match to most or all of the apache-2.0 license text is highly relevant. The
Rule relevance is used as the basis to compute a match score.
"""
if self.has_stored_relevance:
return
# case for false positive: they do not have licenses and their matches are
# never returned. Relevance is zero.
if self.false_positive:
self.relevance = 0
return
# case for negative rules with no license (and are not an FP)
# they do not have licenses and their matches are never returned
if self.negative():
self.relevance = 0
return
length = self.length
if length >= 20:
# general case
self.relevance = 100
else:
self.relevance = length * 5
def _print_rule_stats():
"""
Print rules statistics.
"""
from licensedcode.cache import get_index
idx = get_index()
rules = idx.rules_by_rid
sizes = Counter(r.length for r in rules)
print('Top 15 lengths: ', sizes.most_common(15))
print('15 smallest lengths: ', sorted(sizes.iteritems(),
key=itemgetter(0))[:15])
high_sizes = Counter(r.high_length for r in rules)
print('Top 15 high lengths: ', high_sizes.most_common(15))
print('15 smallest high lengths: ', sorted(high_sizes.iteritems(),
key=itemgetter(0))[:15])
|
#
# Copyright (c) 2017 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
import codecs
from collections import Counter
from collections import defaultdict
from collections import namedtuple
from collections import OrderedDict
from itertools import chain
from operator import itemgetter
from os.path import exists
from os.path import join
from commoncode.fileutils import file_base_name
from commoncode.fileutils import file_name
from commoncode.fileutils import file_iter
from textcode.analysis import text_lines
from licensedcode import MIN_MATCH_LENGTH
from licensedcode import MIN_MATCH_HIGH_LENGTH
from licensedcode import licenses_data_dir
from licensedcode import rules_data_dir
from licensedcode import saneyaml
from licensedcode.tokenize import rule_tokenizer
from licensedcode.tokenize import query_tokenizer
from commoncode import fileutils
"""
Reference License and license Rule structures persisted as a combo of a YAML
data file and one or more text files containing license or notice texts.
"""
# Set to True to print detailed representations of objects when tracing
TRACE_REPR = False
class License(object):
"""
A license consists of these files, where <key> is the license key:
- <key>.yml : the license data in YAML
- <key>.LICENSE: the license text
"""
# we do not really need slots but they help keep the attributes in check
__slots__ = (
'key',
'src_dir',
'is_deprecated',
'short_name',
'name',
'category',
'owner',
'homepage_url',
'notes',
'is_exception',
'next_version',
'is_or_later',
'base_license',
'spdx_license_key',
'text_urls',
'osi_url',
'faq_url',
'other_urls',
'data_file',
'text_file',
'minimum_coverage',
'standard_notice',
)
def __init__(self, key=None, src_dir=licenses_data_dir):
"""
Initialize a License for a `key` and data stored in the `src_dir`
directory. Key is a lower-case unique ascii string.
"""
# unique key: lower case ASCII characters, digits, underscore and dots.
self.key = key or ''
self.src_dir = src_dir
# if this is a deprecated license, add also notes explaining why
self.is_deprecated = False
# commonly used short name, often abbreviated.
self.short_name = ''
# full name.
self.name = ''
# Permissive, Copyleft, etc
self.category = ''
self.owner = ''
self.homepage_url = ''
self.notes = ''
# if this is a license exception, the license key this exception applies to
self.is_exception = False
# FIXME: this is WAY too complicated and likely not needed
# license key for the next version of this license if any
self.next_version = ''
# True if this license allows later versions to be used
self.is_or_later = False
# If is_or_later is True, license key for the not "or later" variant if any
self.base_license = ''
# SPDX key for SPDX licenses
self.spdx_license_key = ''
# Various URLs for info
self.text_urls = []
self.osi_url = ''
self.faq_url = ''
self.other_urls = []
self.minimum_coverage = 0
self.standard_notice = ''
# data file paths and known extensions
self.data_file = ''
self.text_file = ''
if self.src_dir:
self.set_file_paths()
if exists(self.data_file):
self.load(src_dir)
def __repr__(self, *args, **kwargs):
return 'License(key="{}")'.format(self.key)
def set_file_paths(self):
self.data_file = join(self.src_dir, self.key + '.yml')
self.text_file = join(self.src_dir, self.key + '.LICENSE')
def relocate(self, target_dir, new_key=None):
"""
Return a copy of this license object relocated to a new `src_dir`.
The data and license text files are persisted in the new `src_dir`.
"""
if not target_dir or target_dir == self.src_dir:
raise ValueError(
'Cannot relocate a License to empty directory or same directory.')
if new_key:
key = new_key
else:
key = self.key
newl = License(key, target_dir)
# copy attributes
excluded_attrs = ('key', 'src_dir', 'data_file', 'text_file',)
attrs = [a for a in self.__slots__ if a not in excluded_attrs]
for name in attrs:
setattr(newl, name, getattr(self, name))
# save it all to files
if self.text:
fileutils.copyfile(self.text_file, newl.text_file)
newl.dump()
return newl
def update(self, mapping):
for k, v in mapping.items():
setattr(self, k, v)
def __copy__(self):
oldl = self.to_dict()
newl = License(key=self.key)
newl.update(oldl)
return newl
@property
def text(self):
"""
License text, re-loaded on demand.
"""
return self._read_text(self.text_file)
def to_dict(self):
"""
Return an OrderedDict of license data (excluding texts).
Fields with empty values are not included.
"""
data = OrderedDict()
data['key'] = self.key
if self.short_name:
data['short_name'] = self.short_name
if self.name:
data['name'] = self.name
if self.is_deprecated:
data['is_deprecated'] = self.is_deprecated
if self.category:
data['category'] = self.category
if self.owner:
data['owner'] = self.owner
if self.homepage_url:
data['homepage_url'] = self.homepage_url
if self.notes:
data['notes'] = self.notes
if self.is_exception:
data['is_exception'] = self.is_exception
if self.next_version:
data['next_version'] = self.next_version
if self.is_or_later:
data['is_or_later'] = self.is_or_later
if self.base_license:
data['base_license'] = self.base_license
if self.spdx_license_key:
data['spdx_license_key'] = self.spdx_license_key
if self.text_urls:
data['text_urls'] = self.text_urls
if self.osi_url:
data['osi_url'] = self.osi_url
if self.faq_url:
data['faq_url'] = self.faq_url
if self.other_urls:
data['other_urls'] = self.other_urls
if self.minimum_coverage:
data['minimum_coverage'] = int(self.minimum_coverage)
if self.standard_notice:
data['standard_notice'] = self.standard_notice
return data
def dump(self):
"""
Dump a representation of self as multiple files named
this way:
- <key>.yml : the license data in YAML
- <key>.LICENSE: the license text
"""
as_yaml = saneyaml.dump(self.to_dict())
self._write(self.data_file, as_yaml)
if self.text:
self._write(self.text_file, self.text)
def _write(self, f, d):
with codecs.open(f, 'wb', encoding='utf-8') as of:
of.write(d)
def load(self, src_dir):
"""
Populate license data from a YAML file stored in of src_dir.
Does not load text files.
Unknown fields are ignored and not bound to the License object.
"""
try:
with codecs.open(self.data_file, encoding='utf-8') as f:
data = saneyaml.load(f.read())
except Exception, e:
# this is a rare case: fail loudly
print()
print('#############################')
print('INVALID LICENSE YAML FILE:', self.data_file)
print('#############################')
print(e)
print('#############################')
raise
numeric_keys = ('minimum_coverage',)
for k, v in data.items():
if k in numeric_keys:
v = int(v)
if k == 'key':
assert self.key == v, 'Inconsistent YAML key and file names for %r' % self.key
setattr(self, k, v)
def _read_text(self, location):
if not exists(location):
text = ''
else:
with codecs.open(location, encoding='utf-8') as f:
text = f.read()
return text
@staticmethod
def validate(licenses, verbose=False, no_dupe_urls=False):
"""
Check that licenses are valid. `licenses` is a mapping of key ->
License. Return dictionaries of infos, errors and warnings mapping a
license key to validation issue messages. Print messages if verbose is
True.
"""
infos = defaultdict(list)
warnings = defaultdict(list)
errors = defaultdict(list)
# used for global dedupe of texts
by_spdx_key = defaultdict(list)
by_text = defaultdict(list)
for key, lic in licenses.items():
err = errors[key].append
warn = warnings[key].append
info = infos[key].append
if not lic.short_name:
warn('No short name')
if not lic.name:
warn('No name')
if not lic.category:
warn('No category')
if not lic.owner:
warn('No owner')
if lic.next_version and lic.next_version not in licenses:
err('License next version is unknown')
if (lic.is_or_later and
lic.base_license and
lic.base_license not in licenses):
err('Base license for an "or later" license is unknown')
# URLS dedupe and consistency
if no_dupe_urls:
if lic.text_urls and not all(lic.text_urls):
warn('Some empty license text_urls')
if lic.other_urls and not all(lic.other_urls):
warn('Some empty license other_urls')
# redundant URLs used multiple times
if lic.homepage_url:
if lic.homepage_url in lic.text_urls:
warn('Homepage URL also in text_urls')
if lic.homepage_url in lic.other_urls:
warn('Homepage URL also in other_urls')
if lic.homepage_url == lic.faq_url:
warn('Homepage URL same as faq_url')
if lic.homepage_url == lic.osi_url:
warn('Homepage URL same as osi_url')
if lic.osi_url or lic.faq_url:
if lic.osi_url == lic.faq_url:
warn('osi_url same as faq_url')
all_licenses = lic.text_urls + lic.other_urls
for url in lic.osi_url, lic.faq_url, lic.homepage_url:
if url: all_licenses.append(url)
if not len(all_licenses) == len(set(all_licenses)):
warn('Some duplicated URLs')
# local text consistency
text = lic.text
license_qtokens = tuple(query_tokenizer(text, lower=True))
license_rtokens = tuple(rule_tokenizer(text, lower=True))
if license_qtokens != license_rtokens:
info('License text contains rule templated region with {{}}')
if not license_qtokens:
info('No license text')
else:
# for global dedupe
by_text[license_qtokens].append(key + ': TEXT')
# SPDX consistency
if lic.spdx_license_key:
by_spdx_key[lic.spdx_license_key].append(key)
# global SPDX consistency
multiple_spdx_keys_used = {k: v for k, v in by_spdx_key.items() if len(v) > 1}
if multiple_spdx_keys_used:
for k, lkeys in multiple_spdx_keys_used.items():
infos['GLOBAL'].append('SPDX key: ' + k + ' used in multiple licenses: ' + ', '.join(sorted(lkeys)))
# global text dedupe
multiple_texts = {k: v for k, v in by_text.items() if len(v) > 1}
if multiple_texts:
for k, msgs in multiple_texts.items():
errors['GLOBAL'].append('Duplicate texts in multiple licenses:' + ', '.join(sorted(msgs)))
errors = {k: v for k, v in errors.items() if v}
warnings = {k: v for k, v in warnings.items() if v}
infos = {k: v for k, v in infos.items() if v}
if verbose:
print('Licenses validation errors:')
for key, msgs in sorted(errors.items()):
print('ERRORS for:', key, ':', '\n'.join(msgs))
print('Licenses validation warnings:')
for key, msgs in sorted(warnings.items()):
print('WARNINGS for:', key, ':', '\n'.join(msgs))
print('Licenses validation infos:')
for key, msgs in sorted(infos.items()):
print('INFOS for:', key, ':', '\n'.join(msgs))
return errors, warnings, infos
def load_licenses(licenses_data_dir=licenses_data_dir , with_deprecated=False):
"""
Return a mapping of key -> license objects, loaded from license files.
"""
licenses = {}
for data_file in file_iter(licenses_data_dir):
if not data_file.endswith('.yml'):
continue
key = file_base_name(data_file)
lic = License(key, licenses_data_dir)
if not with_deprecated and lic.is_deprecated:
continue
licenses[key] = lic
return licenses
def get_rules(licenses_data_dir=licenses_data_dir, rules_data_dir=rules_data_dir):
"""
Return a mapping of key->license and an iterable of license detection
rules loaded from licenses and rules files. Raise a MissingLicenses
exceptions if a rule references unknown license keys.
"""
from licensedcode.cache import get_licenses_db
licenses = get_licenses_db(licenses_data_dir=licenses_data_dir)
rules = list(load_rules(rules_data_dir=rules_data_dir))
check_rules_integrity(rules, licenses)
licenses_as_rules = build_rules_from_licenses(licenses)
return chain(licenses_as_rules, rules)
class MissingLicenses(Exception):
pass
def check_rules_integrity(rules, licenses):
"""
Given a lists of rules, check that all the rule license keys
reference a known license from a mapping of licenses (key->license).
Raise a MissingLicense exception with a message containing the list
of rule files without a corresponding license.
"""
invalid_rules = defaultdict(set)
for rule in rules:
unknown_keys = [key for key in rule.licenses if key not in licenses]
if unknown_keys:
invalid_rules[rule.data_file].update(unknown_keys)
if invalid_rules:
invalid_rules = (
' '.join(keys) + '\n' +
'file://' + data_file + '\n' +
'file://' + data_file.replace('.yml', '.RULE') + '\n'
for data_file, keys in invalid_rules.iteritems() if keys)
msg = 'Rules referencing missing licenses:\n' + '\n'.join(sorted(invalid_rules))
raise MissingLicenses(msg)
def build_rules_from_licenses(licenses):
"""
Return an iterable of rules built from each license text from a `licenses`
iterable of license objects. Use the reference list if `licenses` is not
provided.
Load the reference license list from disk if `licenses` is not provided.
"""
for license_key, license_obj in licenses.iteritems():
text_file = join(license_obj.src_dir, license_obj.text_file)
minimum_coverage = license_obj.minimum_coverage
if exists(text_file):
yield Rule(text_file=text_file, licenses=[license_key],
minimum_coverage=minimum_coverage, is_license=True)
def load_rules(rules_data_dir=rules_data_dir):
"""
Return an iterable of rules loaded from rule files.
"""
# TODO: OPTIMIZE: create a graph of rules to account for containment and similarity clusters?
# TODO: we should assign the rule id at that stage
seen_files = set()
processed_files = set()
lower_case_files = set()
case_problems = set()
for data_file in file_iter(rules_data_dir):
if data_file.endswith('.yml'):
base_name = file_base_name(data_file)
rule_file = join(rules_data_dir, base_name + '.RULE')
yield Rule(data_file=data_file, text_file=rule_file)
# accumulate sets to ensures we do not have illegal names or extra
# orphaned files
data_lower = data_file.lower()
if data_lower in lower_case_files:
case_problems.add(data_lower)
else:
lower_case_files.add(data_lower)
rule_lower = rule_file.lower()
if rule_lower in lower_case_files:
case_problems.add(rule_lower)
else:
lower_case_files.add(rule_lower)
processed_files.update([data_file, rule_file])
if not data_file.endswith('~'):
seen_files.add(data_file)
unknown_files = seen_files - processed_files
if unknown_files or case_problems:
if unknown_files:
files = '\n'.join(sorted(unknown_files))
msg = 'Orphaned files in rule directory: %(rules_data_dir)r\n%(files)s'
if case_problems:
files = '\n'.join(sorted(case_problems))
msg += '\nRule files with non-unique name ignoring casein rule directory: %(rules_data_dir)r\n%(files)s'
raise Exception(msg % locals())
Thresholds = namedtuple('Thresholds', ['high_len', 'low_len', 'length', 'small', 'min_high', 'min_len'])
class Rule(object):
"""
A detection rule object is a text to use for detection and corresponding
detected licenses and metadata. A rule text can contain variable parts
marked with double curly braces {{ }}.
"""
__slots__ = (
'rid', 'identifier',
'licenses', 'license_choice', 'license', 'licensing_identifier',
'false_positive',
'notes',
'data_file', 'text_file', '_text',
'length', 'low_length', 'high_length', '_thresholds',
'length_unique', 'low_unique', 'high_unique', '_thresholds_unique',
'minimum_coverage', 'relevance', 'has_stored_relevance',
'is_license'
)
def __init__(self, data_file=None, text_file=None, licenses=None,
license_choice=False, notes=None, minimum_coverage=0,
is_license=False, _text=None):
###########
# FIXME: !!! TWO RULES MAY DIFFER BECAUSE THEY ARE UPDATED BY INDEXING
###########
# optional rule id int typically assigned at indexing time
self.rid = None
if not text_file:
assert _text
self.identifier = '_tst_' + str(len(_text))
else:
self.identifier = file_name(text_file)
# list of valid license keys
self.licenses = licenses or []
# True if the rule is for a choice of all licenses. default to False
self.license_choice = license_choice
# License expression
# TODO: implement me.
self.license = ''
# is this rule text a false positive when matched? (filtered out) FIXME: this
# should be unified with the relevance: a false positive match is a a match
# with a relevance of zero
self.false_positive = False
# is this rule text only to be matched with a minimum coverage?
self.minimum_coverage = minimum_coverage
# optional, free text
self.notes = notes
# what is the relevance of a match to this rule text? a float between 0 and
# 100 where 100 means highly relevant and 0 menas not relevant at all.
# For instance a match to the "gpl" or the "cpol" words have a fairly low
# relevance as they are a weak indication of an actual license and could be
# a false positive. In somce cases, this may even be used to discard obvious
# false positive matches automatically.
self.relevance = 100
self.has_stored_relevance = False
# set to True if the rule is built from a .LICENSE full text
self.is_license = is_license
# path to the YAML data file for this rule
self.data_file = data_file
if data_file:
try:
self.load()
except Exception as e:
message = 'While loading: %(data_file)r' % locals() + e.message
print(message)
raise Exception(message)
# licensing identifier: TODO: replace with a license expression
self.licensing_identifier = tuple(self.licenses) + (license_choice,)
# path to the rule text file
self.text_file = text_file
# for testing only, when we do not use a file
self._text = _text
# These attributes are computed upon text loading or setting the thresholds
###########################################################################
# length in number of token strings
self.length = 0
# lengths in token ids, including high/low token counts, set in indexing
self.high_length = 0
self.low_length = 0
self._thresholds = None
# lengths in token ids, including high/low token counts, set in indexing
self.high_unique = 0
self.low_unique = 0
self.length_unique = 0
self._thresholds_unique = None
def tokens(self, lower=True):
"""
Return an iterable of token strings for this rule. Length is recomputed as a
side effect. Tokens inside double curly braces (eg. {{ignored}}) are skipped
and ignored.
"""
length = 0
text = self.text()
text = text.strip()
# FIXME: this is weird:
# We tag this rule as being a bare URL if it starts with a scheme and is on one line: this is used to determine a matching approach
if text.startswith(('http://', 'https://', 'ftp://')) and '\n' not in text[:1000]:
self.minimum_coverage = 100
for token in rule_tokenizer(self.text(), lower=lower):
length += 1
yield token
self.length = length
self.compute_relevance()
def text(self):
"""
Return the rule text loaded from its file.
"""
# used for test only
if self._text:
return self._text
elif self.text_file and exists(self.text_file):
# IMPORTANT: use the same process as query text loading for symmetry
lines = text_lines(self.text_file, demarkup=False)
return ' '.join(lines)
else:
raise Exception('Inconsistent rule text for:', self.identifier)
def __repr__(self):
idf = self.identifier
ird = self.rid
if TRACE_REPR:
text = self.text()
else:
text = self.text()
if text:
text = text[:20] + '...'
keys = self.licenses
choice = self.license_choice
fp = self.false_positive
minimum_coverage = self.minimum_coverage
return 'Rule(%(idf)r, lics=%(keys)r, fp=%(fp)r, minimum_coverage=%(minimum_coverage)r, %(text)r)' % locals()
def same_licensing(self, other):
"""
Return True if the other rule has a the same licensing as this rule.
"""
# TODO: include license expressions
return self.licensing_identifier == other.licensing_identifier
def licensing_contains(self, other):
"""
Return True if the other rule licensing is contained in this rule licensing.
"""
# TODO: include license expressions
return set(self.licensing_identifier).issuperset(other.licensing_identifier)
def negative(self):
"""
Return True if this Rule does not point to real licenses and is
therefore a "negative" rule denoting that a match to this rule should be
ignored.
"""
return not self.licenses and not self.false_positive
def small(self):
"""
Is this a small rule? It needs special handling for detection.
"""
SMALL_RULE = 15
return self.length < SMALL_RULE or self.minimum_coverage == 100
def thresholds(self):
"""
Return a Thresholds tuple considering the occurrence of all tokens.
"""
if not self._thresholds:
min_high = min([self.high_length, MIN_MATCH_HIGH_LENGTH])
min_len = MIN_MATCH_LENGTH
# note: we cascade ifs from largest to smallest lengths
# FIXME: this is not efficient
if self.length < 30:
min_len = self.length // 2
if self.length < 10:
min_high = self.high_length
min_len = self.length
self.minimum_coverage = 80
if self.length < 3:
min_high = self.high_length
min_len = self.length
self.minimum_coverage = 100
if self.minimum_coverage == 100:
min_high = self.high_length
min_len = self.length
self._thresholds = Thresholds(
self.high_length, self.low_length, self.length,
self.small(), min_high, min_len
)
return self._thresholds
def thresholds_unique(self):
"""
Return a Thresholds tuple considering the occurrence of only unique tokens.
"""
if not self._thresholds_unique:
highu = (int(self.high_unique // 2)) or self.high_unique
min_high = min([highu, MIN_MATCH_HIGH_LENGTH])
min_len = MIN_MATCH_LENGTH
# note: we cascade IFs from largest to smallest lengths
if self.length < 20:
min_high = self.high_unique
min_len = min_high
if self.length < 10:
min_high = self.high_unique
if self.length_unique < 2:
min_len = self.length_unique
else:
min_len = self.length_unique - 1
if self.length < 5:
min_high = self.high_unique
min_len = self.length_unique
if self.minimum_coverage == 100:
min_high = self.high_unique
min_len = self.length_unique
self._thresholds_unique = Thresholds(
self.high_unique, self.low_unique, self.length_unique,
self.small(), min_high, min_len)
return self._thresholds_unique
def to_dict(self):
"""
Return an OrderedDict of self, excluding texts. Used for serialization.
Empty values are not included.
"""
data = OrderedDict()
if self.licenses:
data['licenses'] = self.licenses
if self.license_choice:
data['license_choice'] = self.license_choice
if self.license:
data['license'] = self.license
if self.false_positive:
data['false_positive'] = self.false_positive
if self.has_stored_relevance:
data['relevance'] = self.relevance
if self.minimum_coverage:
data['minimum_coverage'] = self.minimum_coverage
if self.notes:
data['notes'] = self.note
return data
def dump(self):
"""
Dump a representation of self to tgt_dir as two files:
- a .yml for the rule data in YAML block format
- a .RULE: the rule text as a UTF-8 file
"""
if self.data_file:
as_yaml = saneyaml.dump(self.to_dict())
with codecs.open(self.data_file, 'wb', encoding='utf-8') as df:
df.write(as_yaml)
with codecs.open(self.text_file, 'wb', encoding='utf-8') as tf:
tf.write(self.text())
def load(self, load_notes=False):
"""
Load self from a .RULE YAML file stored in self.data_file.
Does not load the rule text file.
Unknown fields are ignored and not bound to the Rule object.
"""
try:
with codecs.open(self.data_file, encoding='utf-8') as f:
data = saneyaml.load(f.read())
except Exception, e:
print('#############################')
print('INVALID LICENSE RULE FILE:', self.data_file)
print('#############################')
print(e)
print('#############################')
# this is a rare case, but yes we abruptly stop.
raise e
self.licenses = data.get('licenses', [])
self.license_choice = data.get('license_choice', False)
self.license = data.get('license')
self.false_positive = data.get('false_positive', False)
relevance = data.get('relevance')
if relevance is not None:
# Keep track if we have a stored relevance of not.
self.has_stored_relevance = True
self.relevance = float(relevance)
self.minimum_coverage = float(data.get('minimum_coverage', 0))
# these are purely informational and not used at run time
if load_notes:
notes = data.get('notes')
if notes:
self.notes = notes.strip()
return self
def compute_relevance(self):
"""
Compute and set the `relevance` attribute for this rule. The
relevance is a float between 0 and 100 where 100 means highly
relevant and 0 means not relevant at all.
It is either pre-defined in the rule YAML data file with the
relevance attribute or computed here using this approach:
- a rule of length up to 20 receives 5 relevance points per token (so a rule
of length 1 has a 5 relevance and a rule of length 20 has a 100 relevance)
- a rule of length over 20 has a 100 relevance
- a false positive or a negative rule has a relevance of zero.
For instance a match to the "gpl" or the "cpol" words have a fairly low
relevance as they are a weak indication of an actual license and could be a
false positive and should therefore be assigned a low relevance. In contrast
a match to most or all of the apache-2.0 license text is highly relevant. The
Rule relevance is used as the basis to compute a match score.
"""
if self.has_stored_relevance:
return
# case for false positive: they do not have licenses and their matches are
# never returned. Relevance is zero.
if self.false_positive:
self.relevance = 0
return
# case for negative rules with no license (and are not an FP)
# they do not have licenses and their matches are never returned
if self.negative():
self.relevance = 0
return
length = self.length
if length >= 20:
# general case
self.relevance = 100
else:
self.relevance = length * 5
def _print_rule_stats():
"""
Print rules statistics.
"""
from licensedcode.cache import get_index
idx = get_index()
rules = idx.rules_by_rid
sizes = Counter(r.length for r in rules)
print('Top 15 lengths: ', sizes.most_common(15))
print('15 smallest lengths: ', sorted(sizes.iteritems(),
key=itemgetter(0))[:15])
high_sizes = Counter(r.high_length for r in rules)
print('Top 15 high lengths: ', high_sizes.most_common(15))
print('15 smallest high lengths: ', sorted(high_sizes.iteritems(),
key=itemgetter(0))[:15])
|
en
| 0.850379
|
# # Copyright (c) 2017 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. # ScanCode is a trademark of nexB Inc. # # You may not use this software except in compliance with the License. # You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. # # When you publish or redistribute any data created with ScanCode or any ScanCode # derivative work, you must accompany this data with the following acknowledgment: # # Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, either express or implied. No content created from # ScanCode should be considered or used as legal advice. Consult an Attorney # for any legal advice. # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. Reference License and license Rule structures persisted as a combo of a YAML data file and one or more text files containing license or notice texts. # Set to True to print detailed representations of objects when tracing A license consists of these files, where <key> is the license key: - <key>.yml : the license data in YAML - <key>.LICENSE: the license text # we do not really need slots but they help keep the attributes in check Initialize a License for a `key` and data stored in the `src_dir` directory. Key is a lower-case unique ascii string. # unique key: lower case ASCII characters, digits, underscore and dots. # if this is a deprecated license, add also notes explaining why # commonly used short name, often abbreviated. # full name. # Permissive, Copyleft, etc # if this is a license exception, the license key this exception applies to # FIXME: this is WAY too complicated and likely not needed # license key for the next version of this license if any # True if this license allows later versions to be used # If is_or_later is True, license key for the not "or later" variant if any # SPDX key for SPDX licenses # Various URLs for info # data file paths and known extensions Return a copy of this license object relocated to a new `src_dir`. The data and license text files are persisted in the new `src_dir`. # copy attributes # save it all to files License text, re-loaded on demand. Return an OrderedDict of license data (excluding texts). Fields with empty values are not included. Dump a representation of self as multiple files named this way: - <key>.yml : the license data in YAML - <key>.LICENSE: the license text Populate license data from a YAML file stored in of src_dir. Does not load text files. Unknown fields are ignored and not bound to the License object. # this is a rare case: fail loudly ############################') ############################') ############################') Check that licenses are valid. `licenses` is a mapping of key -> License. Return dictionaries of infos, errors and warnings mapping a license key to validation issue messages. Print messages if verbose is True. # used for global dedupe of texts # URLS dedupe and consistency # redundant URLs used multiple times # local text consistency # for global dedupe # SPDX consistency # global SPDX consistency # global text dedupe Return a mapping of key -> license objects, loaded from license files. Return a mapping of key->license and an iterable of license detection rules loaded from licenses and rules files. Raise a MissingLicenses exceptions if a rule references unknown license keys. Given a lists of rules, check that all the rule license keys reference a known license from a mapping of licenses (key->license). Raise a MissingLicense exception with a message containing the list of rule files without a corresponding license. Return an iterable of rules built from each license text from a `licenses` iterable of license objects. Use the reference list if `licenses` is not provided. Load the reference license list from disk if `licenses` is not provided. Return an iterable of rules loaded from rule files. # TODO: OPTIMIZE: create a graph of rules to account for containment and similarity clusters? # TODO: we should assign the rule id at that stage # accumulate sets to ensures we do not have illegal names or extra # orphaned files A detection rule object is a text to use for detection and corresponding detected licenses and metadata. A rule text can contain variable parts marked with double curly braces {{ }}. ########### # FIXME: !!! TWO RULES MAY DIFFER BECAUSE THEY ARE UPDATED BY INDEXING ########### # optional rule id int typically assigned at indexing time # list of valid license keys # True if the rule is for a choice of all licenses. default to False # License expression # TODO: implement me. # is this rule text a false positive when matched? (filtered out) FIXME: this # should be unified with the relevance: a false positive match is a a match # with a relevance of zero # is this rule text only to be matched with a minimum coverage? # optional, free text # what is the relevance of a match to this rule text? a float between 0 and # 100 where 100 means highly relevant and 0 menas not relevant at all. # For instance a match to the "gpl" or the "cpol" words have a fairly low # relevance as they are a weak indication of an actual license and could be # a false positive. In somce cases, this may even be used to discard obvious # false positive matches automatically. # set to True if the rule is built from a .LICENSE full text # path to the YAML data file for this rule # licensing identifier: TODO: replace with a license expression # path to the rule text file # for testing only, when we do not use a file # These attributes are computed upon text loading or setting the thresholds ########################################################################### # length in number of token strings # lengths in token ids, including high/low token counts, set in indexing # lengths in token ids, including high/low token counts, set in indexing Return an iterable of token strings for this rule. Length is recomputed as a side effect. Tokens inside double curly braces (eg. {{ignored}}) are skipped and ignored. # FIXME: this is weird: # We tag this rule as being a bare URL if it starts with a scheme and is on one line: this is used to determine a matching approach Return the rule text loaded from its file. # used for test only # IMPORTANT: use the same process as query text loading for symmetry Return True if the other rule has a the same licensing as this rule. # TODO: include license expressions Return True if the other rule licensing is contained in this rule licensing. # TODO: include license expressions Return True if this Rule does not point to real licenses and is therefore a "negative" rule denoting that a match to this rule should be ignored. Is this a small rule? It needs special handling for detection. Return a Thresholds tuple considering the occurrence of all tokens. # note: we cascade ifs from largest to smallest lengths # FIXME: this is not efficient Return a Thresholds tuple considering the occurrence of only unique tokens. # note: we cascade IFs from largest to smallest lengths Return an OrderedDict of self, excluding texts. Used for serialization. Empty values are not included. Dump a representation of self to tgt_dir as two files: - a .yml for the rule data in YAML block format - a .RULE: the rule text as a UTF-8 file Load self from a .RULE YAML file stored in self.data_file. Does not load the rule text file. Unknown fields are ignored and not bound to the Rule object. ############################') ############################') ############################') # this is a rare case, but yes we abruptly stop. # Keep track if we have a stored relevance of not. # these are purely informational and not used at run time Compute and set the `relevance` attribute for this rule. The relevance is a float between 0 and 100 where 100 means highly relevant and 0 means not relevant at all. It is either pre-defined in the rule YAML data file with the relevance attribute or computed here using this approach: - a rule of length up to 20 receives 5 relevance points per token (so a rule of length 1 has a 5 relevance and a rule of length 20 has a 100 relevance) - a rule of length over 20 has a 100 relevance - a false positive or a negative rule has a relevance of zero. For instance a match to the "gpl" or the "cpol" words have a fairly low relevance as they are a weak indication of an actual license and could be a false positive and should therefore be assigned a low relevance. In contrast a match to most or all of the apache-2.0 license text is highly relevant. The Rule relevance is used as the basis to compute a match score. # case for false positive: they do not have licenses and their matches are # never returned. Relevance is zero. # case for negative rules with no license (and are not an FP) # they do not have licenses and their matches are never returned # general case Print rules statistics.
| 1.216173
| 1
|
lib/plugins/generator/replay.py
|
muh-bazm/eventgen
| 3
|
6625700
|
<reponame>muh-bazm/eventgen<gh_stars>1-10
# TODO Add timestamp detection for common timestamp format
from __future__ import division
from generatorplugin import GeneratorPlugin
import os
import logging
import datetime, time
import math
import re
from eventgentoken import Token
from eventgenoutput import Output
class ReplayGenerator(GeneratorPlugin):
queueable = False
_rpevents = None
_currentevent = None
_times = None
_timeSinceSleep = None
_lastts = None
def __init__(self, sample):
GeneratorPlugin.__init__(self, sample)
self._sample = sample
# Logger already setup by config, just get an instance
logger = logging.getLogger('eventgen')
from eventgenconfig import EventgenAdapter
adapter = EventgenAdapter(logger, {'module': 'ReplayGenerator', 'sample': sample.name})
globals()['logger'] = adapter
from eventgenconfig import Config
globals()['c'] = Config()
self._currentevent = 0
self._timeSinceSleep = datetime.timedelta()
self._times = [ ]
s = self._sample
# Load sample from a file, using cache if possible, from superclass GeneratorPlugin
s.loadSample()
self._rpevents = s.sampleDict
self._currentevent = 0
# 8/18/15 CS Because this is not a queueable plugin, we can in a threadsafe way modify these data structures at init
# Iterate through events and remove any events which do not match a configured timestamp,
# log it and then continue on
for e in self._rpevents:
try:
s.getTSFromEvent(e[s.timeField])
except ValueError:
self._rpevents = [x for x in self._rpevents if x['_raw'] != e['_raw']]
# Quick check to see if we're sorted in time order, if not reverse
if len(self._rpevents) > 1:
ts1 = s.getTSFromEvent(self._rpevents[0][s.timeField])
ts2 = s.getTSFromEvent(self._rpevents[1][s.timeField])
td = ts2 - ts1
x = 2
# Make sure we're not all zero
while td.days == 0 and td.seconds == 0 and td.microseconds == 0 and x < len(self._rpevents):
ts2 = s.getTSFromEvent(self._rpevents[x][s.timeField])
td = ts2 - ts1
x += 1
self.logger.debug("Testing timestamps ts1: %s ts2: %s" % (ts1.strftime('%Y-%m-%d %H:%M:%S'), ts2.strftime('%Y-%m-%d %H:%M:%S')))
if td.days < 0:
self.logger.debug("Timestamp order seems to be reverse chronological, reversing")
self._rpevents.reverse()
try:
self.setupBackfill()
except ValueError as e:
self.logger.error("Exception during backfill for sample '%s': '%s'" % (s.name, str(e)))
def gen(self, count, earliest, latest):
# 9/8/15 CS Check to make sure we have events to replay
if len(self._rpevents) == 0:
# Return insanely large sleep time
return 10000
# For shortness sake, we're going to call the sample s
s = self._sample
logger.debug("Generating sample '%s' in app '%s'" % (s.name, s.app))
startTime = datetime.datetime.now()
# If we are replaying then we need to set the current sampleLines to the event
# we're currently on
self.sampleDict = [ self._rpevents[self._currentevent] ]
# 9/2/2015 Commenting out, can't find a use for this anymore.
# self.setOutputMetadata(self.sampleDict[0])
logger.debugv("Finding timestamp to compute interval for events")
if self._lastts == None:
self._lastts = s.getTSFromEvent(self._rpevents[self._currentevent][s.timeField])
if (self._currentevent+1) < len(self._rpevents):
nextts = s.getTSFromEvent(self._rpevents[self._currentevent+1][s.timeField])
else:
logger.debugv("At end of _rpevents")
# At the end of the buffer, we sould wait the average amount of time at the end
# return 0
try:
avgtimes = sum(list(self._times)) / len(self._times) / s.timeMultiple
except ZeroDivisionError:
avgtimes = 1
interval = datetime.timedelta(seconds=int(math.modf(avgtimes)[1]), microseconds=int(round(math.modf(avgtimes)[0] * 1000000, 0)))
nextts = self._lastts + interval
logger.debugv("Setting nextts to '%s' with avgtimes '%d' and interval '%s'" % (nextts, avgtimes, interval))
self._times = [ ]
logger.debugv('Computing timeDiff nextts: "%s" lastts: "%s"' % (nextts, self._lastts))
timeDiff = nextts - self._lastts
if timeDiff.days >= 0 and timeDiff.seconds >= 0 and timeDiff.microseconds >= 0:
partialInterval = float("%d.%06d" % (timeDiff.seconds, timeDiff.microseconds))
else:
partialInterval = 0
if s.timeMultiple > 0:
partialInterval *= s.timeMultiple
logger.debugv("Setting partialInterval for replay mode with timeMultiple %s: %s %s" % (s.timeMultiple, timeDiff, partialInterval))
self._lastts = nextts
for x in range(len(self.sampleDict)):
event = self.sampleDict[x]['_raw']
# Maintain state for every token in a given event
# Hash contains keys for each file name which is assigned a list of values
# picked from a random line in that file
mvhash = { }
## Iterate tokens
for token in s.tokens:
token.mvhash = mvhash
event = token.replace(event, et=s.earliestTime(), lt=s.latestTime(), s=s)
if token.replacementType == 'timestamp' and s.timeField != '_raw':
# 9/4/15 CS Found this change from 9/29/14 where I fixed a bug with timestamp
# replacement. Not sure why I set to this value to none other than I would
# want to always use the timestamp from the timeField. Unfortunately
# what happens is that what if we have multiple timestamps configured for
# the sample (which happens with autotimestamp feature now) and we set
# this to none and future timestamps don't match. In this case, I believe
# by commenting this out the first timestamp to be replaced for the sample
# will win and every other replacement will use that cached time.
# s.timestamp = None
token.replace(self.sampleDict[x][s.timeField], et=s.earliestTime(), lt=s.latestTime(), s=s)
if(s.hostToken):
# clear the host mvhash every time, because we need to re-randomize it
s.hostToken.mvhash = {}
host = self.sampleDict[x]['host']
if (s.hostToken):
host = s.hostToken.replace(host, s=s)
l = [ { '_raw': event,
'index': self.sampleDict[x]['index'],
'host': host,
'hostRegex': s.hostRegex,
'source': self.sampleDict[x]['source'],
'sourcetype': self.sampleDict[x]['sourcetype'],
'_time': int(time.mktime(s.timestamp.timetuple())) } ]
self._out.bulksend(l)
s.timestamp = None
# If we roll over the max number of lines, roll over the counter and start over
if (self._currentevent+1) >= len(self._rpevents):
logger.debug("At end of the sample file, starting replay from the top")
self._currentevent = 0
self._lastts = None
else:
self._currentevent += 1
# Track time we were running and time we need to sleep
endTime = datetime.datetime.now()
timeDiff = endTime - startTime
self._timeSinceSleep += timeDiff
if partialInterval > 0:
timeDiffFrac = "%d.%06d" % (self._timeSinceSleep.seconds, self._timeSinceSleep.microseconds)
logger.debug("Generation of sample '%s' in app '%s' completed in %s seconds. Sleeping for %f seconds" \
% (s.name, s.app, timeDiffFrac, partialInterval) )
self._timeSinceSleep = datetime.timedelta()
# Add for average sleep time calculation when we're at the end of the events
self._times.append(partialInterval)
self._out.flush(endOfInterval=True)
return partialInterval
def load():
return ReplayGenerator
|
# TODO Add timestamp detection for common timestamp format
from __future__ import division
from generatorplugin import GeneratorPlugin
import os
import logging
import datetime, time
import math
import re
from eventgentoken import Token
from eventgenoutput import Output
class ReplayGenerator(GeneratorPlugin):
queueable = False
_rpevents = None
_currentevent = None
_times = None
_timeSinceSleep = None
_lastts = None
def __init__(self, sample):
GeneratorPlugin.__init__(self, sample)
self._sample = sample
# Logger already setup by config, just get an instance
logger = logging.getLogger('eventgen')
from eventgenconfig import EventgenAdapter
adapter = EventgenAdapter(logger, {'module': 'ReplayGenerator', 'sample': sample.name})
globals()['logger'] = adapter
from eventgenconfig import Config
globals()['c'] = Config()
self._currentevent = 0
self._timeSinceSleep = datetime.timedelta()
self._times = [ ]
s = self._sample
# Load sample from a file, using cache if possible, from superclass GeneratorPlugin
s.loadSample()
self._rpevents = s.sampleDict
self._currentevent = 0
# 8/18/15 CS Because this is not a queueable plugin, we can in a threadsafe way modify these data structures at init
# Iterate through events and remove any events which do not match a configured timestamp,
# log it and then continue on
for e in self._rpevents:
try:
s.getTSFromEvent(e[s.timeField])
except ValueError:
self._rpevents = [x for x in self._rpevents if x['_raw'] != e['_raw']]
# Quick check to see if we're sorted in time order, if not reverse
if len(self._rpevents) > 1:
ts1 = s.getTSFromEvent(self._rpevents[0][s.timeField])
ts2 = s.getTSFromEvent(self._rpevents[1][s.timeField])
td = ts2 - ts1
x = 2
# Make sure we're not all zero
while td.days == 0 and td.seconds == 0 and td.microseconds == 0 and x < len(self._rpevents):
ts2 = s.getTSFromEvent(self._rpevents[x][s.timeField])
td = ts2 - ts1
x += 1
self.logger.debug("Testing timestamps ts1: %s ts2: %s" % (ts1.strftime('%Y-%m-%d %H:%M:%S'), ts2.strftime('%Y-%m-%d %H:%M:%S')))
if td.days < 0:
self.logger.debug("Timestamp order seems to be reverse chronological, reversing")
self._rpevents.reverse()
try:
self.setupBackfill()
except ValueError as e:
self.logger.error("Exception during backfill for sample '%s': '%s'" % (s.name, str(e)))
def gen(self, count, earliest, latest):
# 9/8/15 CS Check to make sure we have events to replay
if len(self._rpevents) == 0:
# Return insanely large sleep time
return 10000
# For shortness sake, we're going to call the sample s
s = self._sample
logger.debug("Generating sample '%s' in app '%s'" % (s.name, s.app))
startTime = datetime.datetime.now()
# If we are replaying then we need to set the current sampleLines to the event
# we're currently on
self.sampleDict = [ self._rpevents[self._currentevent] ]
# 9/2/2015 Commenting out, can't find a use for this anymore.
# self.setOutputMetadata(self.sampleDict[0])
logger.debugv("Finding timestamp to compute interval for events")
if self._lastts == None:
self._lastts = s.getTSFromEvent(self._rpevents[self._currentevent][s.timeField])
if (self._currentevent+1) < len(self._rpevents):
nextts = s.getTSFromEvent(self._rpevents[self._currentevent+1][s.timeField])
else:
logger.debugv("At end of _rpevents")
# At the end of the buffer, we sould wait the average amount of time at the end
# return 0
try:
avgtimes = sum(list(self._times)) / len(self._times) / s.timeMultiple
except ZeroDivisionError:
avgtimes = 1
interval = datetime.timedelta(seconds=int(math.modf(avgtimes)[1]), microseconds=int(round(math.modf(avgtimes)[0] * 1000000, 0)))
nextts = self._lastts + interval
logger.debugv("Setting nextts to '%s' with avgtimes '%d' and interval '%s'" % (nextts, avgtimes, interval))
self._times = [ ]
logger.debugv('Computing timeDiff nextts: "%s" lastts: "%s"' % (nextts, self._lastts))
timeDiff = nextts - self._lastts
if timeDiff.days >= 0 and timeDiff.seconds >= 0 and timeDiff.microseconds >= 0:
partialInterval = float("%d.%06d" % (timeDiff.seconds, timeDiff.microseconds))
else:
partialInterval = 0
if s.timeMultiple > 0:
partialInterval *= s.timeMultiple
logger.debugv("Setting partialInterval for replay mode with timeMultiple %s: %s %s" % (s.timeMultiple, timeDiff, partialInterval))
self._lastts = nextts
for x in range(len(self.sampleDict)):
event = self.sampleDict[x]['_raw']
# Maintain state for every token in a given event
# Hash contains keys for each file name which is assigned a list of values
# picked from a random line in that file
mvhash = { }
## Iterate tokens
for token in s.tokens:
token.mvhash = mvhash
event = token.replace(event, et=s.earliestTime(), lt=s.latestTime(), s=s)
if token.replacementType == 'timestamp' and s.timeField != '_raw':
# 9/4/15 CS Found this change from 9/29/14 where I fixed a bug with timestamp
# replacement. Not sure why I set to this value to none other than I would
# want to always use the timestamp from the timeField. Unfortunately
# what happens is that what if we have multiple timestamps configured for
# the sample (which happens with autotimestamp feature now) and we set
# this to none and future timestamps don't match. In this case, I believe
# by commenting this out the first timestamp to be replaced for the sample
# will win and every other replacement will use that cached time.
# s.timestamp = None
token.replace(self.sampleDict[x][s.timeField], et=s.earliestTime(), lt=s.latestTime(), s=s)
if(s.hostToken):
# clear the host mvhash every time, because we need to re-randomize it
s.hostToken.mvhash = {}
host = self.sampleDict[x]['host']
if (s.hostToken):
host = s.hostToken.replace(host, s=s)
l = [ { '_raw': event,
'index': self.sampleDict[x]['index'],
'host': host,
'hostRegex': s.hostRegex,
'source': self.sampleDict[x]['source'],
'sourcetype': self.sampleDict[x]['sourcetype'],
'_time': int(time.mktime(s.timestamp.timetuple())) } ]
self._out.bulksend(l)
s.timestamp = None
# If we roll over the max number of lines, roll over the counter and start over
if (self._currentevent+1) >= len(self._rpevents):
logger.debug("At end of the sample file, starting replay from the top")
self._currentevent = 0
self._lastts = None
else:
self._currentevent += 1
# Track time we were running and time we need to sleep
endTime = datetime.datetime.now()
timeDiff = endTime - startTime
self._timeSinceSleep += timeDiff
if partialInterval > 0:
timeDiffFrac = "%d.%06d" % (self._timeSinceSleep.seconds, self._timeSinceSleep.microseconds)
logger.debug("Generation of sample '%s' in app '%s' completed in %s seconds. Sleeping for %f seconds" \
% (s.name, s.app, timeDiffFrac, partialInterval) )
self._timeSinceSleep = datetime.timedelta()
# Add for average sleep time calculation when we're at the end of the events
self._times.append(partialInterval)
self._out.flush(endOfInterval=True)
return partialInterval
def load():
return ReplayGenerator
|
en
| 0.918742
|
# TODO Add timestamp detection for common timestamp format # Logger already setup by config, just get an instance # Load sample from a file, using cache if possible, from superclass GeneratorPlugin # 8/18/15 CS Because this is not a queueable plugin, we can in a threadsafe way modify these data structures at init # Iterate through events and remove any events which do not match a configured timestamp, # log it and then continue on # Quick check to see if we're sorted in time order, if not reverse # Make sure we're not all zero # 9/8/15 CS Check to make sure we have events to replay # Return insanely large sleep time # For shortness sake, we're going to call the sample s # If we are replaying then we need to set the current sampleLines to the event # we're currently on # 9/2/2015 Commenting out, can't find a use for this anymore. # self.setOutputMetadata(self.sampleDict[0]) # At the end of the buffer, we sould wait the average amount of time at the end # return 0 # Maintain state for every token in a given event # Hash contains keys for each file name which is assigned a list of values # picked from a random line in that file ## Iterate tokens # 9/4/15 CS Found this change from 9/29/14 where I fixed a bug with timestamp # replacement. Not sure why I set to this value to none other than I would # want to always use the timestamp from the timeField. Unfortunately # what happens is that what if we have multiple timestamps configured for # the sample (which happens with autotimestamp feature now) and we set # this to none and future timestamps don't match. In this case, I believe # by commenting this out the first timestamp to be replaced for the sample # will win and every other replacement will use that cached time. # s.timestamp = None # clear the host mvhash every time, because we need to re-randomize it # If we roll over the max number of lines, roll over the counter and start over # Track time we were running and time we need to sleep # Add for average sleep time calculation when we're at the end of the events
| 2.267199
| 2
|
src/genie/libs/parser/iosxe/tests/ShowIpv6Neighbors/cli/equal/golden_output2_expected.py
|
balmasea/genieparser
| 204
|
6625701
|
expected_output = {
"interface": {
"GigabitEthernet3": {
"interface": "GigabitEthernet3",
"neighbors": {
"2001:db8:888c:1::2": {
"age": "0",
"ip": "2001:db8:888c:1::2",
"link_layer_address": "fa16.3eff.1b7b",
"neighbor_state": "REACH",
},
"2001:db8:c8d1:1::11": {
"age": "-",
"ip": "2001:db8:c8d1:1::11",
"link_layer_address": "bbbb.beff.bcbc",
"neighbor_state": "REACH",
},
"FE80::F816:3EFF:FEFF:1B7B": {
"age": "0",
"ip": "FE80::F816:3EFF:FEFF:1B7B",
"link_layer_address": "fa16.3eff.1b7b",
"neighbor_state": "REACH",
},
},
},
"GigabitEthernet5": {
"interface": "GigabitEthernet5",
"neighbors": {
"2001:db8:c8d1:1::3": {
"age": "0",
"ip": "2001:db8:c8d1:1::3",
"link_layer_address": "5e01.c0ff.0209",
"neighbor_state": "REACH",
},
"FE80::5C01:C0FF:FEFF:209": {
"age": "1",
"ip": "FE80::5C01:C0FF:FEFF:209",
"link_layer_address": "5e01.c0ff.0209",
"neighbor_state": "STALE",
},
},
},
}
}
|
expected_output = {
"interface": {
"GigabitEthernet3": {
"interface": "GigabitEthernet3",
"neighbors": {
"2001:db8:888c:1::2": {
"age": "0",
"ip": "2001:db8:888c:1::2",
"link_layer_address": "fa16.3eff.1b7b",
"neighbor_state": "REACH",
},
"2001:db8:c8d1:1::11": {
"age": "-",
"ip": "2001:db8:c8d1:1::11",
"link_layer_address": "bbbb.beff.bcbc",
"neighbor_state": "REACH",
},
"FE80::F816:3EFF:FEFF:1B7B": {
"age": "0",
"ip": "FE80::F816:3EFF:FEFF:1B7B",
"link_layer_address": "fa16.3eff.1b7b",
"neighbor_state": "REACH",
},
},
},
"GigabitEthernet5": {
"interface": "GigabitEthernet5",
"neighbors": {
"2001:db8:c8d1:1::3": {
"age": "0",
"ip": "2001:db8:c8d1:1::3",
"link_layer_address": "5e01.c0ff.0209",
"neighbor_state": "REACH",
},
"FE80::5C01:C0FF:FEFF:209": {
"age": "1",
"ip": "FE80::5C01:C0FF:FEFF:209",
"link_layer_address": "5e01.c0ff.0209",
"neighbor_state": "STALE",
},
},
},
}
}
|
none
| 1
| 1.443934
| 1
|
|
experiments/evaluate.py
|
mwydmuch/napkinXC
| 36
|
6625702
|
<filename>experiments/evaluate.py
#!/usr/bin/env python
import sys
import os
napkinxc_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../python")
sys.path.append(napkinxc_path)
from napkinxc.measures import *
def load_true_file(filepath):
with open(filepath) as file:
Y = []
for i, line in enumerate(file):
if i == 0 and len(line.split(' ')) == 3:
continue
Y.append([int(y) for y in line.strip().split(' ', 1)[0].split(',') if ':' not in y])
return Y
def load_pred_file(filepath):
with open(filepath) as file:
Y = []
def convert_y(y):
y = y.split(':')
if len(y) == 2:
return (int(y[0]), float(y[1]))
else:
return int(y)
for line in file:
Y.append([convert_y(y) for y in line.strip().split(' ')])
return Y
def load_inv_ps_file(filepath):
with open(filepath) as file:
v = []
for line in file:
v.append(float(line.strip()))
return v
if __name__ == "__main__":
if len(sys.argv) < 3:
print("Requires true and prediction files as arguments!")
exit(1)
true = load_true_file(sys.argv[1])
pred = load_pred_file(sys.argv[2])
inv_ps = None
if len(sys.argv) > 3:
inv_ps = load_inv_ps_file(sys.argv[3])
precision = 6
max_k = 5
measures = {
#"HL": {"func": hamming_loss, "inv_ps": False},
"P": {"func": precision_at_k, "inv_ps": False},
"R": {"func": recall_at_k, "inv_ps": False},
"nDCG": {"func": ndcg_at_k, "inv_ps": False},
"PSP": {"func": psprecision_at_k, "inv_ps": True},
"PSR": {"func": psrecall_at_k, "inv_ps": True},
"PSnDCG": {"func": psndcg_at_k, "inv_ps": True},
}
for m, v in measures.items():
r = None
if v["inv_ps"] and inv_ps is not None:
r = v["func"](true, pred, inv_ps, k=max_k)
else:
r = v["func"](true, pred, k=max_k)
if r is not None:
for k in range(max_k):
print(("{}@{}: {:." + str(precision) + "f}").format(m, k + 1, r[k]))
|
<filename>experiments/evaluate.py
#!/usr/bin/env python
import sys
import os
napkinxc_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../python")
sys.path.append(napkinxc_path)
from napkinxc.measures import *
def load_true_file(filepath):
with open(filepath) as file:
Y = []
for i, line in enumerate(file):
if i == 0 and len(line.split(' ')) == 3:
continue
Y.append([int(y) for y in line.strip().split(' ', 1)[0].split(',') if ':' not in y])
return Y
def load_pred_file(filepath):
with open(filepath) as file:
Y = []
def convert_y(y):
y = y.split(':')
if len(y) == 2:
return (int(y[0]), float(y[1]))
else:
return int(y)
for line in file:
Y.append([convert_y(y) for y in line.strip().split(' ')])
return Y
def load_inv_ps_file(filepath):
with open(filepath) as file:
v = []
for line in file:
v.append(float(line.strip()))
return v
if __name__ == "__main__":
if len(sys.argv) < 3:
print("Requires true and prediction files as arguments!")
exit(1)
true = load_true_file(sys.argv[1])
pred = load_pred_file(sys.argv[2])
inv_ps = None
if len(sys.argv) > 3:
inv_ps = load_inv_ps_file(sys.argv[3])
precision = 6
max_k = 5
measures = {
#"HL": {"func": hamming_loss, "inv_ps": False},
"P": {"func": precision_at_k, "inv_ps": False},
"R": {"func": recall_at_k, "inv_ps": False},
"nDCG": {"func": ndcg_at_k, "inv_ps": False},
"PSP": {"func": psprecision_at_k, "inv_ps": True},
"PSR": {"func": psrecall_at_k, "inv_ps": True},
"PSnDCG": {"func": psndcg_at_k, "inv_ps": True},
}
for m, v in measures.items():
r = None
if v["inv_ps"] and inv_ps is not None:
r = v["func"](true, pred, inv_ps, k=max_k)
else:
r = v["func"](true, pred, k=max_k)
if r is not None:
for k in range(max_k):
print(("{}@{}: {:." + str(precision) + "f}").format(m, k + 1, r[k]))
|
en
| 0.310364
|
#!/usr/bin/env python #"HL": {"func": hamming_loss, "inv_ps": False},
| 2.304459
| 2
|
adhoc/histogram_area_optim2_repl_good.py
|
MrCsabaToth/IK
| 0
|
6625703
|
histogram = [6, 2, 5, 4, 5, 1, 6]
histogram = [1, 1, 2, 1, 1]
l = 1
r = 3
print("start")
stack = list()
max_area = 0
index = l
while index <= r:
print("s0", stack)
if (not stack) or (histogram[stack[-1]] <= histogram[index]):
stack.append(index)
index += 1
print("s1", stack)
else:
top_of_stack = stack.pop()
area = (histogram[top_of_stack] * ((index - stack[-1] - 1) if stack else index -l))
max_area = max(max_area, area)
print("s2", stack, max_area)
while stack:
print("s3", stack)
top_of_stack = stack.pop()
print("t", top_of_stack, index)
area = (histogram[top_of_stack] * ((index - stack[-1] - 1) if stack else index -l))
max_area = max(max_area, area)
print("s4", stack, max_area)
print("finish", max_area)
|
histogram = [6, 2, 5, 4, 5, 1, 6]
histogram = [1, 1, 2, 1, 1]
l = 1
r = 3
print("start")
stack = list()
max_area = 0
index = l
while index <= r:
print("s0", stack)
if (not stack) or (histogram[stack[-1]] <= histogram[index]):
stack.append(index)
index += 1
print("s1", stack)
else:
top_of_stack = stack.pop()
area = (histogram[top_of_stack] * ((index - stack[-1] - 1) if stack else index -l))
max_area = max(max_area, area)
print("s2", stack, max_area)
while stack:
print("s3", stack)
top_of_stack = stack.pop()
print("t", top_of_stack, index)
area = (histogram[top_of_stack] * ((index - stack[-1] - 1) if stack else index -l))
max_area = max(max_area, area)
print("s4", stack, max_area)
print("finish", max_area)
|
none
| 1
| 3.500255
| 4
|
|
nncf/tensorflow/accuracy_aware_training/runner.py
|
GreenWaves-Technologies/nncf
| 136
|
6625704
|
<gh_stars>100-1000
"""
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os.path as osp
from nncf.common.utils.logger import logger as nncf_logger
from nncf.common.accuracy_aware_training.runner import BaseAccuracyAwareTrainingRunner
from nncf.common.accuracy_aware_training.runner import BaseAdaptiveCompressionLevelTrainingRunner
from nncf.common.utils.helpers import configure_accuracy_aware_paths
class TFAccuracyAwareTrainingRunner(BaseAccuracyAwareTrainingRunner):
"""
The Training Runner implementation for TensorFlow training code.
"""
def initialize_training_loop_fns(self, train_epoch_fn, validate_fn, configure_optimizers_fn=None,
dump_checkpoint_fn=None, tensorboard_writer=None, log_dir=None):
super().initialize_training_loop_fns(train_epoch_fn, validate_fn, configure_optimizers_fn, dump_checkpoint_fn,
tensorboard_writer=tensorboard_writer, log_dir=log_dir)
self._log_dir = self._log_dir if self._log_dir is not None \
else 'runs'
self._log_dir = configure_accuracy_aware_paths(self._log_dir)
self._checkpoint_save_dir = self._log_dir
def retrieve_uncompressed_model_accuracy(self, model):
if not hasattr(model, 'original_model_accuracy'):
raise RuntimeError('Original model does not contain the pre-calculated reference metric value')
self.uncompressed_model_accuracy = model.original_model_accuracy
def train_epoch(self, model, compression_controller):
compression_controller.scheduler.epoch_step()
# assuming that epoch number is only used for logging in train_fn:
self._train_epoch_fn(compression_controller,
model,
epoch=self.cumulative_epoch_count)
self.training_epoch_count += 1
self.cumulative_epoch_count += 1
def validate(self, model):
self.current_val_metric_value = self._validate_fn(model, epoch=self.cumulative_epoch_count)
is_best = (not self.is_higher_metric_better) != (self.current_val_metric_value > self.best_val_metric_value)
if is_best:
self.best_val_metric_value = self.current_val_metric_value
self.add_tensorboard_scalar('val/accuracy_aware/metric_value',
data=self.current_val_metric_value, step=self.cumulative_epoch_count)
return self.current_val_metric_value
def reset_training(self):
self.training_epoch_count = 0
self.best_val_metric_value = 0
def dump_statistics(self, model, compression_controller):
statistics = compression_controller.statistics()
if self.verbose:
nncf_logger.info(statistics.to_str())
self.dump_checkpoint(model, compression_controller)
def dump_checkpoint(self, model, compression_controller):
checkpoint_path = osp.join(self._checkpoint_save_dir, 'acc_aware_checkpoint_last.pb')
model.save_weights(checkpoint_path)
if self.best_val_metric_value == self.current_val_metric_value:
best_checkpoint_filename = 'acc_aware_checkpoint_best.ckpt'
best_path = osp.join(self._checkpoint_save_dir, best_checkpoint_filename)
self._best_checkpoint = best_path
model.save_weights(best_path)
def add_tensorboard_scalar(self, key, data, step):
if self.verbose and self._tensorboard_writer is not None:
self._tensorboard_writer({key: data}, step)
def load_best_checkpoint(self, model):
resuming_checkpoint_path = self._best_checkpoint
nncf_logger.info('Loading the best checkpoint found during training '
'{}...'.format(resuming_checkpoint_path))
model.load_weights(resuming_checkpoint_path)
def configure_optimizers(self):
pass
def update_learning_rate(self):
pass
class TFAdaptiveCompressionLevelTrainingRunner(TFAccuracyAwareTrainingRunner,
BaseAdaptiveCompressionLevelTrainingRunner):
def __init__(self, accuracy_aware_params, verbose=True,
minimal_compression_rate=0.05, maximal_compression_rate=0.95, dump_checkpoints=True):
TFAccuracyAwareTrainingRunner.__init__(self, accuracy_aware_params,
verbose, dump_checkpoints)
BaseAdaptiveCompressionLevelTrainingRunner.__init__(self, accuracy_aware_params,
verbose,
minimal_compression_rate,
maximal_compression_rate,
dump_checkpoints)
def update_training_history(self, compression_rate, best_metric_value):
best_accuracy_budget = best_metric_value - self.minimal_tolerable_accuracy
self._compressed_training_history.append((compression_rate, best_accuracy_budget))
def dump_checkpoint(self, model, compression_controller):
checkpoint_path = osp.join(self._checkpoint_save_dir, 'acc_aware_checkpoint_last.pb')
model.save_weights(checkpoint_path)
if self.best_val_metric_value == self.current_val_metric_value:
best_path = osp.join(self._checkpoint_save_dir,
'acc_aware_checkpoint_best_compression_rate_'
'{comp_rate:.3f}.ckpt'.format(comp_rate=self.compression_rate_target))
self._best_checkpoints[self.compression_rate_target] = best_path
model.save_weights(best_path)
def load_best_checkpoint(self, model):
# load checkpoint with highest compression rate and positive acc budget
possible_checkpoint_rates = [comp_rate for (comp_rate, acc_budget) in self._compressed_training_history
if acc_budget >= 0]
if not possible_checkpoint_rates:
nncf_logger.warning('Could not produce a compressed model satisfying the set accuracy '
'degradation criterion during training. Increasing the number of training '
'epochs')
best_checkpoint_compression_rate = sorted(possible_checkpoint_rates)[-1]
resuming_checkpoint_path = self._best_checkpoints[best_checkpoint_compression_rate]
nncf_logger.info('Loading the best checkpoint found during training '
'{}...'.format(resuming_checkpoint_path))
model.load_weights(resuming_checkpoint_path)
@property
def compressed_training_history(self):
return self._compressed_training_history
|
"""
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os.path as osp
from nncf.common.utils.logger import logger as nncf_logger
from nncf.common.accuracy_aware_training.runner import BaseAccuracyAwareTrainingRunner
from nncf.common.accuracy_aware_training.runner import BaseAdaptiveCompressionLevelTrainingRunner
from nncf.common.utils.helpers import configure_accuracy_aware_paths
class TFAccuracyAwareTrainingRunner(BaseAccuracyAwareTrainingRunner):
"""
The Training Runner implementation for TensorFlow training code.
"""
def initialize_training_loop_fns(self, train_epoch_fn, validate_fn, configure_optimizers_fn=None,
dump_checkpoint_fn=None, tensorboard_writer=None, log_dir=None):
super().initialize_training_loop_fns(train_epoch_fn, validate_fn, configure_optimizers_fn, dump_checkpoint_fn,
tensorboard_writer=tensorboard_writer, log_dir=log_dir)
self._log_dir = self._log_dir if self._log_dir is not None \
else 'runs'
self._log_dir = configure_accuracy_aware_paths(self._log_dir)
self._checkpoint_save_dir = self._log_dir
def retrieve_uncompressed_model_accuracy(self, model):
if not hasattr(model, 'original_model_accuracy'):
raise RuntimeError('Original model does not contain the pre-calculated reference metric value')
self.uncompressed_model_accuracy = model.original_model_accuracy
def train_epoch(self, model, compression_controller):
compression_controller.scheduler.epoch_step()
# assuming that epoch number is only used for logging in train_fn:
self._train_epoch_fn(compression_controller,
model,
epoch=self.cumulative_epoch_count)
self.training_epoch_count += 1
self.cumulative_epoch_count += 1
def validate(self, model):
self.current_val_metric_value = self._validate_fn(model, epoch=self.cumulative_epoch_count)
is_best = (not self.is_higher_metric_better) != (self.current_val_metric_value > self.best_val_metric_value)
if is_best:
self.best_val_metric_value = self.current_val_metric_value
self.add_tensorboard_scalar('val/accuracy_aware/metric_value',
data=self.current_val_metric_value, step=self.cumulative_epoch_count)
return self.current_val_metric_value
def reset_training(self):
self.training_epoch_count = 0
self.best_val_metric_value = 0
def dump_statistics(self, model, compression_controller):
statistics = compression_controller.statistics()
if self.verbose:
nncf_logger.info(statistics.to_str())
self.dump_checkpoint(model, compression_controller)
def dump_checkpoint(self, model, compression_controller):
checkpoint_path = osp.join(self._checkpoint_save_dir, 'acc_aware_checkpoint_last.pb')
model.save_weights(checkpoint_path)
if self.best_val_metric_value == self.current_val_metric_value:
best_checkpoint_filename = 'acc_aware_checkpoint_best.ckpt'
best_path = osp.join(self._checkpoint_save_dir, best_checkpoint_filename)
self._best_checkpoint = best_path
model.save_weights(best_path)
def add_tensorboard_scalar(self, key, data, step):
if self.verbose and self._tensorboard_writer is not None:
self._tensorboard_writer({key: data}, step)
def load_best_checkpoint(self, model):
resuming_checkpoint_path = self._best_checkpoint
nncf_logger.info('Loading the best checkpoint found during training '
'{}...'.format(resuming_checkpoint_path))
model.load_weights(resuming_checkpoint_path)
def configure_optimizers(self):
pass
def update_learning_rate(self):
pass
class TFAdaptiveCompressionLevelTrainingRunner(TFAccuracyAwareTrainingRunner,
BaseAdaptiveCompressionLevelTrainingRunner):
def __init__(self, accuracy_aware_params, verbose=True,
minimal_compression_rate=0.05, maximal_compression_rate=0.95, dump_checkpoints=True):
TFAccuracyAwareTrainingRunner.__init__(self, accuracy_aware_params,
verbose, dump_checkpoints)
BaseAdaptiveCompressionLevelTrainingRunner.__init__(self, accuracy_aware_params,
verbose,
minimal_compression_rate,
maximal_compression_rate,
dump_checkpoints)
def update_training_history(self, compression_rate, best_metric_value):
best_accuracy_budget = best_metric_value - self.minimal_tolerable_accuracy
self._compressed_training_history.append((compression_rate, best_accuracy_budget))
def dump_checkpoint(self, model, compression_controller):
checkpoint_path = osp.join(self._checkpoint_save_dir, 'acc_aware_checkpoint_last.pb')
model.save_weights(checkpoint_path)
if self.best_val_metric_value == self.current_val_metric_value:
best_path = osp.join(self._checkpoint_save_dir,
'acc_aware_checkpoint_best_compression_rate_'
'{comp_rate:.3f}.ckpt'.format(comp_rate=self.compression_rate_target))
self._best_checkpoints[self.compression_rate_target] = best_path
model.save_weights(best_path)
def load_best_checkpoint(self, model):
# load checkpoint with highest compression rate and positive acc budget
possible_checkpoint_rates = [comp_rate for (comp_rate, acc_budget) in self._compressed_training_history
if acc_budget >= 0]
if not possible_checkpoint_rates:
nncf_logger.warning('Could not produce a compressed model satisfying the set accuracy '
'degradation criterion during training. Increasing the number of training '
'epochs')
best_checkpoint_compression_rate = sorted(possible_checkpoint_rates)[-1]
resuming_checkpoint_path = self._best_checkpoints[best_checkpoint_compression_rate]
nncf_logger.info('Loading the best checkpoint found during training '
'{}...'.format(resuming_checkpoint_path))
model.load_weights(resuming_checkpoint_path)
@property
def compressed_training_history(self):
return self._compressed_training_history
|
en
| 0.861283
|
Copyright (c) 2022 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. The Training Runner implementation for TensorFlow training code. # assuming that epoch number is only used for logging in train_fn: # load checkpoint with highest compression rate and positive acc budget
| 1.757285
| 2
|
tools/TopicFastMerge/topic_model_fastmerge.py
|
StevenLOL/Familia
| 2,753
|
6625705
|
#coding=utf-8
# Copyright (c) 2017, Baidu.com, Inc. All Rights Reserved
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
from collections import defaultdict as ddict
import operator
class TopicModelFastMerge(object):
"""
针对超大模型的主题去重,如果计算两两主题之间的相似度会非常耗时,我们引入simhash的快速去重算法。
主要分为两步:基于Simhash的主题预分析以及基于Weighted Jaccard Similarity的Cluster内主题相关性分析。
Attributes:
_num_topics: 模型主题个数
_word_topic_file: 主题模型文件
"""
def __init__(self, model_dir, conf_file):
"""
通过传进来配置文件初始化参数
Args:
model_dir: 模型目录
conf_file: 模型配置文件
"""
parameters = self.config_parser(model_dir + '/' + conf_file)
self._num_topics = int(parameters["num_topics"])
self._word_topic_file = model_dir + '/' + parameters["word_topic_file"].strip('"')
def conv_topic_word(self):
"""
将词-主题格式转换为主题-词格式。
词-主题表格存放每行格式为:词ID 主题ID:个数 ... 主题ID:个数
转换成主题-词存储,格式为:主题ID 词ID:个数 ... 词ID:个数
Args:
None
Returns:
topic_word: 转换后的主题-词模型
topic_sum: 统计每个主题下词的总数
"""
topic_word = [[] for _ in xrange(self._num_topics)]
topic_sum = [0] * self._num_topics
with open(self._word_topic_file) as f:
for line in f:
cols = line.strip().split()
word_id = int(cols[0])
for index in range(1, len(cols)):
topic_id, cnt = [int(item) for item in cols[index].split(':')]
topic_word[topic_id].append((word_id, cnt))
topic_sum[topic_id] += cnt
return topic_word, topic_sum
def config_parser(self, config):
"""
读取配置文件,解析参数。
Args:
config: 模型配置
Returns:
parameters: 模型配置词典
"""
parameters = {}
with open(config) as f:
for line in f:
if line.startswith('#'):
continue
cols = line.strip().split(':')
if len(cols) != 2:
continue
parameters[cols[0].strip()] = cols[1].strip()
return parameters
def select_topk(self, topic_word, topic_sum, topk):
"""
从每个主题中选取概率最大的k个词
Args:
topic_word: 主题-词表格
topic_sum: 每个主题下的词总数
topk: 每个主题选取词的个数
Returns:
topk_items: 存放每个主题前K个词以及对应的概率
"""
topk_items = [[] for _ in xrange(self._num_topics)]
for topic_id in xrange(self._num_topics):
topic_word[topic_id].sort(key=operator.itemgetter(1), reverse=True)
topk_items[topic_id] = [(item[0], float(item[1]) / topic_sum[topic_id]) for item in topic_word[topic_id][:topk]]
return topk_items
def string_hash(self, word_str, hashbits):
"""
将一个字符串映射成一个哈希码
Args:
word_str: 输入字符串
hashbits: 哈希码位数
Returns:
hash_code: 哈希码
"""
if word_str == "":
return 0
hash_code = ord(word_str[0]) << 7
m = 1000003
mask = 2 ** hashbits - 1
for ch in word_str:
hash_code = ((hash_code * m) ^ ord(ch)) & mask
hash_code ^= len(word_str)
if hash_code == -1:
hash_code = -2
return hash_code
def word_id_hash(self, hashbits):
"""
为词表中的所有词生成哈希码
Args:
hashbits: 哈希码位数
Returns:
word_hash: 词表哈希词典
"""
word_hash = ddict(int)
with open(self._word_topic_file) as f:
for line in f:
word_id = line.strip().split(' ', 1)[0]
word_hash[word_id] = self.string_hash(word_id, hashbits)
return word_hash
def topic_simhash(self, topic_word, topic_sum, hashbits):
"""
对于该主题下的每个单词,计算word_id字符串对应的hash值,
每个word_id也有相应的权值weight,为topic_count/topic_sum。
若该word_id字符串对应的hash值为1001,加权后的值为weight -weight -weight weight,
即hash某一位值为‘1’则为‘weight’,hash某一位值为‘0’则为‘-weight’。
Args:
topic_word: 主题-词格式的模型
topic_sum: 每个主题下词的总数
hashbits: 哈希码位数
Returns:
tpc_simhash: 主题哈希词典
"""
word_hash = self.word_id_hash(hashbits)
tpc_simhash = ddict(int)
for tid in xrange(self._num_topics):
topic_value = [0.0] * hashbits
masks = [1 << i for i in xrange(hashbits)]
for item in topic_word[tid]:
w_hash = word_hash[str(item[0])]
weight = float(item[1]) / topic_sum[tid]
for index in xrange(hashbits):
if w_hash & masks[index]:
topic_value[index] += weight
else:
topic_value[index] -= weight
simhash = 0
for index in xrange(hashbits):
if topic_value[index] >= 0:
simhash |= masks[index]
tpc_simhash[tid] = simhash
return tpc_simhash
def topic_cluster(self, tpc_simhash, hashbits):
"""
将相同哈希码的主题聚成一个类别
Args:
tpc_simhash: 主题哈希词典
hashbits: 哈希码位数
Returns:
dis_sets: list类型,每个元素为一个set,存放属于同一类的主题ID
"""
overlap_pair = []
for tid1 in xrange(self._num_topics - 1):
for tid2 in xrange(tid1 + 1, self._num_topics):
topic_sim = (tpc_simhash[tid1] ^ tpc_simhash[tid2]) & ((1 << hashbits) -1)
if topic_sim == 0:
overlap_pair.append((tid1, tid2))
# 通过并查集合并主题对
dis_sets = self.disjoint_set(overlap_pair)
candidate_topic_cnt = 0
for item in dis_sets:
candidate_topic_cnt += len(item)
print("There are {} candidate topic pairs with the same Simhash value in {} clusters.".format(
candidate_topic_cnt, len(dis_sets)))
return dis_sets
def disjoint_set(self, overlap_pair):
"""
并查集算法
Args:
overlap_pair: list类型,每个元素为一个tuple,包含两个冗余的主题ID,例如:
[(1, 2), (2, 3), (4, 5)]
Returns:
results: list类型,每个元素为一个set,包含若干个冗余的主题ID,例如:
[set(1, 2, 3), set(4, 5)]
"""
d = ddict(list)
for index, (tid1, tid2) in enumerate(overlap_pair):
d[tid1].append(index)
d[tid2].append(index)
sets = []
while len(d):
indexs = set(d.popitem()[1])
temp_set = set()
while len(indexs):
temp_set |= indexs
temp_list = list()
for index in indexs:
for tid in overlap_pair[index]:
for ind in d.pop(tid, []):
temp_list += [ind]
indexs = set(temp_list) - temp_set
sets += [temp_set]
results = []
for indexs in sets:
temp_list = []
for index in indexs:
for tid in overlap_pair[index]:
temp_list += [tid]
results.append(set(temp_list))
return results
def reduce_topic(self, topk, Jac_thresh, hashbits, output_file):
"""
删除冗余主题,主要分为两步:
1.基于Simhash的主题预分析;
2.基于Weighted Jaccard Similarity的Cluster内主题相关性分析。
Args:
topk: 每个主题取的词个数
Jac_thresh: 阈值,Jaccard Similarity高于该值则当成冗余主题对
hashbits: 哈希码位数
output_file: 去重模型保存文件
Returns:
None
"""
topic_word, topic_sum = self.conv_topic_word()
topk_items = self.select_topk(topic_word, topic_sum, topk)
tpc_simhash = self.topic_simhash(topic_word, topic_sum, hashbits)
# 获取主题聚类
topic_clusters = self.topic_cluster(tpc_simhash, hashbits)
topk_words = []
for topic_id in xrange(self._num_topics):
topk_words.append(set([item[0] for item in topk_items[topic_id]]))
# 对主题聚类内进行相相关分析
overlap_pair = []
for index in xrange(len(topic_clusters)):
cluster = list(topic_clusters[index])
length = len(cluster)
for index1 in xrange(length - 1):
for index2 in xrange(index1 + 1, length):
tid1 = cluster[index1]
tid2 = cluster[index2]
overlap_set = topk_words[tid1] & topk_words[tid2]
min_value = {}
for word in overlap_set:
min_value[word] = 1.0
w_union = 0.0
for word, prob in topk_items[tid1]:
w_union += prob
if word in overlap_set and min_value[word] > prob:
min_value[word] = prob
for word, prob in topk_items[tid2]:
w_union += prob
if word in overlap_set and min_value[word] > prob:
min_value[word] = prob
w_inter = 0.0
for prob in min_value.itervalues():
w_inter += prob
overlap = w_inter / (w_union - w_inter)
# 添加高于阈值的主题对
if overlap >= Jac_thresh:
overlap_pair.append((tid1, tid2))
# 使用并查集算法合并主题对
dis_sets = self.disjoint_set(overlap_pair)
redundant_topic_cnt = 0
for item in dis_sets:
redundant_topic_cnt += len(item)
print("Merge {} redundant topic pairs into {} topics (sets).".format(
redundant_topic_cnt, len(dis_sets)))
# 对属于同一类的主题进行合并
for index in xrange(len(dis_sets)):
current_list = list(dis_sets[index])
for topic_id in current_list[1:]:
topic_word[current_list[0]] += topic_word[topic_id]
topic_word[topic_id] = []
# 转换成word-topic形式
word_topic = ddict(list)
new_topic_index = -1
for topic_id in xrange(self._num_topics):
if topic_word[topic_id] == []:
continue
new_topic_index += 1
word_dict = ddict(int)
for word_id, cnt in topic_word[topic_id]:
word_dict[word_id] += cnt
for word_id, cnt in word_dict.items():
word_topic[word_id].append("{}:{}".format(new_topic_index, cnt))
print("Now, in total we have {} refined topics.".format(new_topic_index + 1))
# 输出模型到文件
out_file = open(output_file, 'w')
for word_id in word_topic.keys():
out_file.writelines("{} {}\n".format(word_id, ' '.join(word_topic[word_id])))
out_file.close()
|
#coding=utf-8
# Copyright (c) 2017, Baidu.com, Inc. All Rights Reserved
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
from collections import defaultdict as ddict
import operator
class TopicModelFastMerge(object):
"""
针对超大模型的主题去重,如果计算两两主题之间的相似度会非常耗时,我们引入simhash的快速去重算法。
主要分为两步:基于Simhash的主题预分析以及基于Weighted Jaccard Similarity的Cluster内主题相关性分析。
Attributes:
_num_topics: 模型主题个数
_word_topic_file: 主题模型文件
"""
def __init__(self, model_dir, conf_file):
"""
通过传进来配置文件初始化参数
Args:
model_dir: 模型目录
conf_file: 模型配置文件
"""
parameters = self.config_parser(model_dir + '/' + conf_file)
self._num_topics = int(parameters["num_topics"])
self._word_topic_file = model_dir + '/' + parameters["word_topic_file"].strip('"')
def conv_topic_word(self):
"""
将词-主题格式转换为主题-词格式。
词-主题表格存放每行格式为:词ID 主题ID:个数 ... 主题ID:个数
转换成主题-词存储,格式为:主题ID 词ID:个数 ... 词ID:个数
Args:
None
Returns:
topic_word: 转换后的主题-词模型
topic_sum: 统计每个主题下词的总数
"""
topic_word = [[] for _ in xrange(self._num_topics)]
topic_sum = [0] * self._num_topics
with open(self._word_topic_file) as f:
for line in f:
cols = line.strip().split()
word_id = int(cols[0])
for index in range(1, len(cols)):
topic_id, cnt = [int(item) for item in cols[index].split(':')]
topic_word[topic_id].append((word_id, cnt))
topic_sum[topic_id] += cnt
return topic_word, topic_sum
def config_parser(self, config):
"""
读取配置文件,解析参数。
Args:
config: 模型配置
Returns:
parameters: 模型配置词典
"""
parameters = {}
with open(config) as f:
for line in f:
if line.startswith('#'):
continue
cols = line.strip().split(':')
if len(cols) != 2:
continue
parameters[cols[0].strip()] = cols[1].strip()
return parameters
def select_topk(self, topic_word, topic_sum, topk):
"""
从每个主题中选取概率最大的k个词
Args:
topic_word: 主题-词表格
topic_sum: 每个主题下的词总数
topk: 每个主题选取词的个数
Returns:
topk_items: 存放每个主题前K个词以及对应的概率
"""
topk_items = [[] for _ in xrange(self._num_topics)]
for topic_id in xrange(self._num_topics):
topic_word[topic_id].sort(key=operator.itemgetter(1), reverse=True)
topk_items[topic_id] = [(item[0], float(item[1]) / topic_sum[topic_id]) for item in topic_word[topic_id][:topk]]
return topk_items
def string_hash(self, word_str, hashbits):
"""
将一个字符串映射成一个哈希码
Args:
word_str: 输入字符串
hashbits: 哈希码位数
Returns:
hash_code: 哈希码
"""
if word_str == "":
return 0
hash_code = ord(word_str[0]) << 7
m = 1000003
mask = 2 ** hashbits - 1
for ch in word_str:
hash_code = ((hash_code * m) ^ ord(ch)) & mask
hash_code ^= len(word_str)
if hash_code == -1:
hash_code = -2
return hash_code
def word_id_hash(self, hashbits):
"""
为词表中的所有词生成哈希码
Args:
hashbits: 哈希码位数
Returns:
word_hash: 词表哈希词典
"""
word_hash = ddict(int)
with open(self._word_topic_file) as f:
for line in f:
word_id = line.strip().split(' ', 1)[0]
word_hash[word_id] = self.string_hash(word_id, hashbits)
return word_hash
def topic_simhash(self, topic_word, topic_sum, hashbits):
"""
对于该主题下的每个单词,计算word_id字符串对应的hash值,
每个word_id也有相应的权值weight,为topic_count/topic_sum。
若该word_id字符串对应的hash值为1001,加权后的值为weight -weight -weight weight,
即hash某一位值为‘1’则为‘weight’,hash某一位值为‘0’则为‘-weight’。
Args:
topic_word: 主题-词格式的模型
topic_sum: 每个主题下词的总数
hashbits: 哈希码位数
Returns:
tpc_simhash: 主题哈希词典
"""
word_hash = self.word_id_hash(hashbits)
tpc_simhash = ddict(int)
for tid in xrange(self._num_topics):
topic_value = [0.0] * hashbits
masks = [1 << i for i in xrange(hashbits)]
for item in topic_word[tid]:
w_hash = word_hash[str(item[0])]
weight = float(item[1]) / topic_sum[tid]
for index in xrange(hashbits):
if w_hash & masks[index]:
topic_value[index] += weight
else:
topic_value[index] -= weight
simhash = 0
for index in xrange(hashbits):
if topic_value[index] >= 0:
simhash |= masks[index]
tpc_simhash[tid] = simhash
return tpc_simhash
def topic_cluster(self, tpc_simhash, hashbits):
"""
将相同哈希码的主题聚成一个类别
Args:
tpc_simhash: 主题哈希词典
hashbits: 哈希码位数
Returns:
dis_sets: list类型,每个元素为一个set,存放属于同一类的主题ID
"""
overlap_pair = []
for tid1 in xrange(self._num_topics - 1):
for tid2 in xrange(tid1 + 1, self._num_topics):
topic_sim = (tpc_simhash[tid1] ^ tpc_simhash[tid2]) & ((1 << hashbits) -1)
if topic_sim == 0:
overlap_pair.append((tid1, tid2))
# 通过并查集合并主题对
dis_sets = self.disjoint_set(overlap_pair)
candidate_topic_cnt = 0
for item in dis_sets:
candidate_topic_cnt += len(item)
print("There are {} candidate topic pairs with the same Simhash value in {} clusters.".format(
candidate_topic_cnt, len(dis_sets)))
return dis_sets
def disjoint_set(self, overlap_pair):
"""
并查集算法
Args:
overlap_pair: list类型,每个元素为一个tuple,包含两个冗余的主题ID,例如:
[(1, 2), (2, 3), (4, 5)]
Returns:
results: list类型,每个元素为一个set,包含若干个冗余的主题ID,例如:
[set(1, 2, 3), set(4, 5)]
"""
d = ddict(list)
for index, (tid1, tid2) in enumerate(overlap_pair):
d[tid1].append(index)
d[tid2].append(index)
sets = []
while len(d):
indexs = set(d.popitem()[1])
temp_set = set()
while len(indexs):
temp_set |= indexs
temp_list = list()
for index in indexs:
for tid in overlap_pair[index]:
for ind in d.pop(tid, []):
temp_list += [ind]
indexs = set(temp_list) - temp_set
sets += [temp_set]
results = []
for indexs in sets:
temp_list = []
for index in indexs:
for tid in overlap_pair[index]:
temp_list += [tid]
results.append(set(temp_list))
return results
def reduce_topic(self, topk, Jac_thresh, hashbits, output_file):
"""
删除冗余主题,主要分为两步:
1.基于Simhash的主题预分析;
2.基于Weighted Jaccard Similarity的Cluster内主题相关性分析。
Args:
topk: 每个主题取的词个数
Jac_thresh: 阈值,Jaccard Similarity高于该值则当成冗余主题对
hashbits: 哈希码位数
output_file: 去重模型保存文件
Returns:
None
"""
topic_word, topic_sum = self.conv_topic_word()
topk_items = self.select_topk(topic_word, topic_sum, topk)
tpc_simhash = self.topic_simhash(topic_word, topic_sum, hashbits)
# 获取主题聚类
topic_clusters = self.topic_cluster(tpc_simhash, hashbits)
topk_words = []
for topic_id in xrange(self._num_topics):
topk_words.append(set([item[0] for item in topk_items[topic_id]]))
# 对主题聚类内进行相相关分析
overlap_pair = []
for index in xrange(len(topic_clusters)):
cluster = list(topic_clusters[index])
length = len(cluster)
for index1 in xrange(length - 1):
for index2 in xrange(index1 + 1, length):
tid1 = cluster[index1]
tid2 = cluster[index2]
overlap_set = topk_words[tid1] & topk_words[tid2]
min_value = {}
for word in overlap_set:
min_value[word] = 1.0
w_union = 0.0
for word, prob in topk_items[tid1]:
w_union += prob
if word in overlap_set and min_value[word] > prob:
min_value[word] = prob
for word, prob in topk_items[tid2]:
w_union += prob
if word in overlap_set and min_value[word] > prob:
min_value[word] = prob
w_inter = 0.0
for prob in min_value.itervalues():
w_inter += prob
overlap = w_inter / (w_union - w_inter)
# 添加高于阈值的主题对
if overlap >= Jac_thresh:
overlap_pair.append((tid1, tid2))
# 使用并查集算法合并主题对
dis_sets = self.disjoint_set(overlap_pair)
redundant_topic_cnt = 0
for item in dis_sets:
redundant_topic_cnt += len(item)
print("Merge {} redundant topic pairs into {} topics (sets).".format(
redundant_topic_cnt, len(dis_sets)))
# 对属于同一类的主题进行合并
for index in xrange(len(dis_sets)):
current_list = list(dis_sets[index])
for topic_id in current_list[1:]:
topic_word[current_list[0]] += topic_word[topic_id]
topic_word[topic_id] = []
# 转换成word-topic形式
word_topic = ddict(list)
new_topic_index = -1
for topic_id in xrange(self._num_topics):
if topic_word[topic_id] == []:
continue
new_topic_index += 1
word_dict = ddict(int)
for word_id, cnt in topic_word[topic_id]:
word_dict[word_id] += cnt
for word_id, cnt in word_dict.items():
word_topic[word_id].append("{}:{}".format(new_topic_index, cnt))
print("Now, in total we have {} refined topics.".format(new_topic_index + 1))
# 输出模型到文件
out_file = open(output_file, 'w')
for word_id in word_topic.keys():
out_file.writelines("{} {}\n".format(word_id, ' '.join(word_topic[word_id])))
out_file.close()
|
zh
| 0.846225
|
#coding=utf-8 # Copyright (c) 2017, Baidu.com, Inc. All Rights Reserved # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. 针对超大模型的主题去重,如果计算两两主题之间的相似度会非常耗时,我们引入simhash的快速去重算法。 主要分为两步:基于Simhash的主题预分析以及基于Weighted Jaccard Similarity的Cluster内主题相关性分析。 Attributes: _num_topics: 模型主题个数 _word_topic_file: 主题模型文件 通过传进来配置文件初始化参数 Args: model_dir: 模型目录 conf_file: 模型配置文件 将词-主题格式转换为主题-词格式。 词-主题表格存放每行格式为:词ID 主题ID:个数 ... 主题ID:个数 转换成主题-词存储,格式为:主题ID 词ID:个数 ... 词ID:个数 Args: None Returns: topic_word: 转换后的主题-词模型 topic_sum: 统计每个主题下词的总数 读取配置文件,解析参数。 Args: config: 模型配置 Returns: parameters: 模型配置词典 从每个主题中选取概率最大的k个词 Args: topic_word: 主题-词表格 topic_sum: 每个主题下的词总数 topk: 每个主题选取词的个数 Returns: topk_items: 存放每个主题前K个词以及对应的概率 将一个字符串映射成一个哈希码 Args: word_str: 输入字符串 hashbits: 哈希码位数 Returns: hash_code: 哈希码 为词表中的所有词生成哈希码 Args: hashbits: 哈希码位数 Returns: word_hash: 词表哈希词典 对于该主题下的每个单词,计算word_id字符串对应的hash值, 每个word_id也有相应的权值weight,为topic_count/topic_sum。 若该word_id字符串对应的hash值为1001,加权后的值为weight -weight -weight weight, 即hash某一位值为‘1’则为‘weight’,hash某一位值为‘0’则为‘-weight’。 Args: topic_word: 主题-词格式的模型 topic_sum: 每个主题下词的总数 hashbits: 哈希码位数 Returns: tpc_simhash: 主题哈希词典 将相同哈希码的主题聚成一个类别 Args: tpc_simhash: 主题哈希词典 hashbits: 哈希码位数 Returns: dis_sets: list类型,每个元素为一个set,存放属于同一类的主题ID # 通过并查集合并主题对 并查集算法 Args: overlap_pair: list类型,每个元素为一个tuple,包含两个冗余的主题ID,例如: [(1, 2), (2, 3), (4, 5)] Returns: results: list类型,每个元素为一个set,包含若干个冗余的主题ID,例如: [set(1, 2, 3), set(4, 5)] 删除冗余主题,主要分为两步: 1.基于Simhash的主题预分析; 2.基于Weighted Jaccard Similarity的Cluster内主题相关性分析。 Args: topk: 每个主题取的词个数 Jac_thresh: 阈值,Jaccard Similarity高于该值则当成冗余主题对 hashbits: 哈希码位数 output_file: 去重模型保存文件 Returns: None # 获取主题聚类 # 对主题聚类内进行相相关分析 # 添加高于阈值的主题对 # 使用并查集算法合并主题对 # 对属于同一类的主题进行合并 # 转换成word-topic形式 # 输出模型到文件
| 1.98149
| 2
|
photonpump/conversations.py
|
bopopescu/photon-pump
| 48
|
6625706
|
import json
import logging
import sys
import time
from asyncio import Future, Queue
try:
from asyncio.exceptions import InvalidStateError
except ImportError:
from asyncio.futures import InvalidStateError
from enum import IntEnum
from typing import Optional, Sequence, Union
from uuid import UUID, uuid4
from photonpump import exceptions
from photonpump import messages
from photonpump import messages_pb2 as proto
from photonpump.messages import (
AllStreamSlice,
ContentType,
Credential,
ExpectedVersion,
InboundMessage,
NewEvent,
NotHandledReason,
OutboundMessage,
Position,
ReadAllResult,
ReadEventResult,
ReadStreamResult,
StreamDirection,
StreamSlice,
SubscriptionResult,
TcpCommand,
_make_event,
)
class StreamingIterator:
def __init__(self, size=0):
self.items = Queue(size)
self.finished = False
self.fut = None
self.last_item = None
def __aiter__(self):
return self
async def enqueue_items(self, items):
for item in items:
await self.enqueue(item)
async def enqueue(self, item):
await self.items.put(item)
self.last_item = item
async def anext(self):
return await self.__anext__()
async def __anext__(self):
if self.finished and self.items.empty():
raise StopAsyncIteration()
_next = await self.items.get()
if isinstance(_next, Exception):
raise _next
return _next
async def asend(self, item):
await self.items.put(item)
@property
def last_event_number(self):
if self.last_item is None:
return None
return self.last_item.event_number
class Conversation:
def __init__(
self,
conversation_id: Optional[UUID] = None,
credential: Optional[Credential] = None,
) -> None:
self.conversation_id = conversation_id or uuid4()
self.result: Future = Future()
self.is_complete = False
self.credential = credential
self._logger = logging.get_named_logger(Conversation)
self.one_way = False
def __str__(self):
return "<%s %s>" % (type(self).__name__, self.conversation_id)
def __eq__(self, other):
if not isinstance(other, Conversation):
return False
return self.conversation_id == other.conversation_id
async def start(self, output: Queue) -> Future:
raise NotImplemented()
async def reply(self, message: InboundMessage, output: Queue) -> None:
raise NotImplementedError()
async def error(self, exn: Exception) -> None:
self.is_complete = True
self.result.set_exception(exn)
def expect_only(self, response: InboundMessage, *commands: TcpCommand):
if response.command not in commands:
raise exceptions.UnexpectedCommand(commands, response.command)
async def respond_to(self, response: InboundMessage, output: Queue) -> None:
try:
if response.command is TcpCommand.BadRequest:
return await self.conversation_error(exceptions.BadRequest, response)
if response.command is TcpCommand.NotAuthenticated:
return await self.conversation_error(
exceptions.NotAuthenticated, response
)
if response.command is TcpCommand.NotHandled:
return await self.unhandled_message(response)
return await self.reply(response, output)
except Exception as exn:
self._logger.exception("Failed to read server response", exc_info=True)
exc_info = sys.exc_info()
return await self.error(
exceptions.PayloadUnreadable(
self.conversation_id, response.payload, exn
).with_traceback(exc_info[2])
)
async def unhandled_message(self, response) -> None:
body = proto.NotHandled()
body.ParseFromString(response.payload)
if body.reason == NotHandledReason.NotReady:
exn = exceptions.NotReady(self.conversation_id)
elif body.reason == NotHandledReason.TooBusy:
exn = exceptions.TooBusy(self.conversation_id)
elif body.reason == NotHandledReason.NotMaster:
exn = exceptions.NotMaster(self.conversation_id)
else:
exn = exceptions.NotHandled(self.conversation_id, body.reason)
return await self.error(exn)
async def conversation_error(self, exn_type, response) -> None:
error = response.payload.decode("UTF-8")
exn = exn_type(self.conversation_id, error)
return await self.error(exn)
class TimerConversation(Conversation):
def __init__(self, conversation_id, credential):
super().__init__(conversation_id, credential)
self.started_at = time.perf_counter()
async def start(self, output: Queue) -> None:
self.started_at = time.perf_counter()
self._logger.debug("TimerConversation started (%s)", self.conversation_id)
async def reply(self, message: InboundMessage, output: Queue) -> None:
self._logger.info("Replying from conversation %s", self)
responded_at = time.perf_counter()
self.result.set_result(responded_at - self.started_at)
self.is_complete = True
class Heartbeat(TimerConversation):
INBOUND = 0
OUTBOUND = 1
def __init__(
self, conversation_id: UUID, direction=INBOUND, credential=None
) -> None:
super().__init__(conversation_id, credential=None)
self.direction = direction
self.result = Future()
async def start(self, output: Queue) -> Future:
await super().start(output)
if self.direction == Heartbeat.INBOUND:
one_way = True
cmd = TcpCommand.HeartbeatResponse
else:
one_way = False
cmd = TcpCommand.HeartbeatRequest
await output.put(
OutboundMessage(
self.conversation_id, cmd, b"", self.credential, one_way=one_way
)
)
self._logger.debug("Heartbeat started (%s)", self.conversation_id)
async def reply(self, message: InboundMessage, output: Queue) -> None:
self.expect_only(message, TcpCommand.HeartbeatResponse)
await super().reply(message, output)
class Ping(TimerConversation):
def __init__(self, conversation_id: UUID = None, credential=None) -> None:
super().__init__(conversation_id or uuid4(), credential)
async def start(self, output: Queue) -> Future:
await super().start(output)
if output:
await output.put(
OutboundMessage(
self.conversation_id, TcpCommand.Ping, b"", self.credential
)
)
self._logger.debug("Ping started (%s)", self.conversation_id)
return self.result
async def reply(self, message: InboundMessage, output: Queue) -> None:
self.expect_only(message, TcpCommand.Pong)
await super().reply(message, output)
class WriteEvents(Conversation):
"""Command class for writing a sequence of events to a single
stream.
Args:
stream: The name of the stream to write to.
events: A sequence of events to write.
expected_version (optional): The expected version of the
target stream used for concurrency control.
required_master (optional): True if this command must be
sent direct to the master node, otherwise False.
correlation_id (optional): A unique identifer for this
command.
"""
def __init__(
self,
stream: str,
events: Sequence[NewEvent],
expected_version: Union[ExpectedVersion, int] = ExpectedVersion.Any,
require_master: bool = False,
conversation_id: UUID = None,
credential=None,
loop=None,
):
super().__init__(conversation_id, credential)
self._logger = logging.get_named_logger(WriteEvents)
self.stream = stream
self.require_master = require_master
self.events = events
self.expected_version = expected_version
async def start(self, output: Queue) -> None:
msg = proto.WriteEvents()
msg.event_stream_id = self.stream
msg.require_master = self.require_master
msg.expected_version = self.expected_version
for event in self.events:
e = msg.events.add()
e.event_id = event.id.bytes_le
e.event_type = event.type
if isinstance(event.data, str):
e.data_content_type = ContentType.Json
e.data = event.data.encode("UTF-8")
elif isinstance(event.data, bytes):
e.data_content_type = ContentType.Binary
e.data = event.data
elif event.data:
e.data_content_type = ContentType.Json
e.data = json.dumps(event.data).encode("UTF-8")
else:
e.data_content_type = ContentType.Binary
e.data = bytes()
if event.metadata:
e.metadata_content_type = ContentType.Json
e.metadata = json.dumps(event.metadata).encode("UTF-8")
else:
e.metadata_content_type = ContentType.Binary
e.metadata = bytes()
data = msg.SerializeToString()
await output.put(
OutboundMessage(
self.conversation_id, TcpCommand.WriteEvents, data, self.credential
)
)
self._logger.debug(
"WriteEvents started on %s (%s)", self.stream, self.conversation_id
)
async def reply(self, message: InboundMessage, output: Queue) -> None:
self.expect_only(message, TcpCommand.WriteEventsCompleted)
result = proto.WriteEventsCompleted()
result.ParseFromString(message.payload)
if result.result == proto.AccessDenied:
await self.error(
exceptions.AccessDenied(
self.conversation_id, type(self).__name__, result.message
)
)
try:
self.result.set_result(result)
self.is_complete = True
except InvalidStateError as exn:
self._logger.error(self.result, message, self, exc_info=True)
raise exn
class ReadAllEventsCompleted:
def __init__(self, message: InboundMessage):
self._data = proto.ReadAllEventsCompleted()
self._data.ParseFromString(message.payload)
self._conversation_id = message.conversation_id
async def dispatch(self, conversation, output):
if self._data.result == ReadAllResult.Success:
await conversation.success(self._data, output)
elif self._data.result == ReadAllResult.Error:
await conversation.error(
exceptions.ReadError(self._conversation_id, "$all", self._data.error)
)
elif self._data.result == ReadAllResult.AccessDenied:
await conversation.error(
exceptions.AccessDenied(
self._conversation_id, type(self).__name__, self._data.error
)
)
class ReadEventCompleted:
def __init__(self, message):
self._data = proto.ReadEventCompleted()
self._data.ParseFromString(message.payload)
self._conversation_id = message.conversation_id
async def dispatch(self, conversation, output):
result = self._data.result
if result == ReadEventResult.Success:
await conversation.success(self._data, output)
elif result == ReadEventResult.NoStream:
await conversation.error(
exceptions.StreamNotFound(self._conversation_id, conversation.stream)
)
elif result == ReadEventResult.StreamDeleted:
await conversation.error(
exceptions.StreamDeleted(self._conversation_id, conversation.stream)
)
elif result == ReadEventResult.Error:
await conversation.error(
exceptions.ReadError(self._conversation_id, conversation.stream, result)
)
elif result == ReadEventResult.AccessDenied:
await conversation.error(
exceptions.AccessDenied(
self._conversation_id,
type(conversation).__name__,
self._data.error,
stream=conversation.stream,
)
)
elif result == ReadEventResult.NotFound:
await conversation.error(
exceptions.EventNotFound(
self._conversation_id, conversation.name, conversation.event_number
)
)
class ReadStreamEventsCompleted:
def __init__(self, message):
self._data = proto.ReadStreamEventsCompleted()
self._data.ParseFromString(message.payload)
self._conversation_id = message.conversation_id
async def dispatch(self, conversation, output):
result = self._data.result
if result == ReadStreamResult.Success:
await conversation.success(self._data, output)
elif result == ReadStreamResult.NoStream:
await conversation.error(
exceptions.StreamNotFound(self._conversation_id, conversation.stream)
)
elif result == ReadStreamResult.StreamDeleted:
await conversation.error(
exceptions.StreamDeleted(self._conversation_id, conversation.stream)
)
elif result == ReadStreamResult.Error:
await conversation.error(
exceptions.ReadError(self._conversation_id, conversation.stream, result)
)
elif result == ReadStreamResult.AccessDenied:
await conversation.error(
exceptions.AccessDenied(
self._conversation_id,
type(conversation).__name__,
self._data.error,
stream=conversation.stream,
)
)
elif (
self.result_type == ReadEventResult
and result.result == self.result_type.NotFound
):
await self.error(
exceptions.EventNotFound(
self.conversation_id, self.stream, self.event_number
)
)
class ReadEvent(Conversation):
"""Command class for reading a single event.
Args:
stream: The name of the stream containing the event.
event_number: The sequence number of the event to read.
resolve_links (optional): True if eventstore should
automatically resolve Link Events, otherwise False.
required_master (optional): True if this command must be
sent direct to the master node, otherwise False.
correlation_id (optional): A unique identifer for this
command.
"""
def __init__(
self,
stream: str,
event_number: int,
resolve_links: bool = True,
require_master: bool = False,
conversation_id: Optional[UUID] = None,
credential=None,
) -> None:
super().__init__(conversation_id, credential=credential)
self.stream = stream
self.event_number = event_number
self.require_master = require_master
self.resolve_link_tos = resolve_links
self.name = stream
async def start(self, output: Queue) -> None:
msg = proto.ReadEvent()
msg.event_number = self.event_number
msg.event_stream_id = self.stream
msg.require_master = self.require_master
msg.resolve_link_tos = self.resolve_link_tos
data = msg.SerializeToString()
await output.put(
OutboundMessage(
self.conversation_id, TcpCommand.Read, data, self.credential
)
)
self._logger.debug(
"ReadEvent started on %s (%s)", self.stream, self.conversation_id
)
async def reply(self, message: InboundMessage, output: Queue):
result = ReadEventCompleted(message)
await result.dispatch(self, output)
async def success(self, response, output: Queue):
self.is_complete = True
self.result.set_result(_make_event(response.event))
def page_stream_message(conversation, from_event):
if conversation.direction == StreamDirection.Forward:
command = TcpCommand.ReadStreamEventsForward
else:
command = TcpCommand.ReadStreamEventsBackward
msg = proto.ReadStreamEvents()
msg.event_stream_id = conversation.stream
msg.from_event_number = from_event
msg.max_count = conversation.batch_size
msg.require_master = conversation.require_master
msg.resolve_link_tos = conversation.resolve_link_tos
data = msg.SerializeToString()
return OutboundMessage(
conversation.conversation_id, command, data, conversation.credential
)
def page_all_message(conversation, from_position: Position):
if conversation.direction == StreamDirection.Forward:
command = TcpCommand.ReadAllEventsForward
else:
command = TcpCommand.ReadAllEventsBackward
msg = proto.ReadAllEvents()
msg.commit_position = from_position.commit
msg.prepare_position = from_position.prepare
msg.max_count = conversation.batch_size
msg.require_master = conversation.require_master
msg.resolve_link_tos = conversation.resolve_link_tos
data = msg.SerializeToString()
return OutboundMessage(
conversation.conversation_id, command, data, conversation.credential
)
class ReadAllEvents(Conversation):
"""Command class for reading all events from a stream.
Args:
commit_position: The commit_position.
event_number: The sequence number of the event to read.
resolve_links (optional): True if eventstore should
automatically resolve Link Events, otherwise False.
required_master (optional): True if this command must be
sent direct to the master node, otherwise False.
correlation_id (optional): A unique identifer for this
command.
"""
def __init__(
self,
from_position: Optional[Position] = None,
max_count: int = 100,
resolve_links: bool = True,
require_master: bool = False,
direction: StreamDirection = StreamDirection.Forward,
credential=None,
conversation_id: UUID = None,
) -> None:
super().__init__(conversation_id, credential=credential)
self.has_first_page = False
self.direction = direction
self.from_position = from_position
self.batch_size = max_count
self.require_master = require_master
self.resolve_link_tos = resolve_links
async def reply(self, message: InboundMessage, output: Queue) -> None:
result = ReadAllEventsCompleted(message)
await result.dispatch(self, output)
async def success(self, result: proto.ReadAllEventsCompleted, output: Queue):
events = [_make_event(x) for x in result.events]
self.is_complete = True
self.result.set_result(
AllStreamSlice(
events,
Position(result.next_commit_position, result.next_prepare_position),
Position(result.commit_position, result.prepare_position),
)
)
async def start(self, output):
await output.put(page_all_message(self, self.from_position))
class ReadStreamEvents(Conversation):
"""Command class for reading events from a stream.
Args:
stream: The name of the stream containing the event.
event_number: The sequence number of the event to read.
resolve_links (optional): True if eventstore should
automatically resolve Link Events, otherwise False.
required_master (optional): True if this command must be
sent direct to the master node, otherwise False.
correlation_id (optional): A unique identifer for this
command.
"""
def __init__(
self,
stream: str,
from_event: int = 0,
max_count: int = 100,
resolve_links: bool = True,
require_master: bool = False,
direction: StreamDirection = StreamDirection.Forward,
credential=None,
conversation_id: UUID = None,
) -> None:
super().__init__(conversation_id, credential=credential)
self.has_first_page = False
self.stream = stream
self.direction = direction
self.from_event = from_event
self.batch_size = max_count
self.require_master = require_master
self.resolve_link_tos = resolve_links
async def reply(self, message: InboundMessage, output: Queue):
result = ReadStreamEventsCompleted(message)
await result.dispatch(self, output)
async def start(self, output: Queue) -> None:
message = page_stream_message(self, self.from_event)
await output.put(message)
self._logger.debug(
"Starting ReadStreamEvents (%d events starting at %d from %s)",
self.batch_size,
self.from_event,
self.stream,
)
async def success(self, result: proto.ReadStreamEventsCompleted, output: Queue):
events = [_make_event(x) for x in result.events]
self.is_complete = True
self.result.set_result(
StreamSlice(
events,
result.next_event_number,
result.last_event_number,
None,
result.last_commit_position,
result.is_end_of_stream,
)
)
class IterAllEvents(Conversation):
"""
Command class for iterating all events in the database.
Args:
from_position (optional): The position to start reading from.
Defaults to photonpump.Beginning when direction is Forward,
photonpump.End when direction is Backward.
batch_size (optional): The maximum number of events to read at a time.
resolve_links (optional): True if eventstore should
automatically resolve Link Events, otherwise False.
require_master (optional): True if this command must be
sent direct to the master node, otherwise False.
direction (optional): Controls whether to read forward or backward
through the events. Defaults to StreamDirection.Forward
correlation_id (optional): A unique identifer for this
command.
"""
def __init__(
self,
from_position: Position = None,
batch_size: int = 100,
resolve_links: bool = True,
require_master: bool = False,
direction: StreamDirection = StreamDirection.Forward,
credential=None,
conversation_id: UUID = None,
):
super().__init__(conversation_id, credential)
self.batch_size = batch_size
self.has_first_page = False
self.resolve_link_tos = resolve_links
self.require_master = require_master
self.from_position = from_position or Position(0, 0)
self.direction = direction
self._logger = logging.get_named_logger(IterAllEvents)
self.iterator = StreamingIterator(self.batch_size)
if direction == StreamDirection.Forward:
self.command = TcpCommand.ReadAllEventsForward
else:
self.command = TcpCommand.ReadAllEventsBackward
async def start(self, output):
await output.put(page_all_message(self, self.from_position))
self._logger.debug("IterAllEvents started (%s)", self.conversation_id)
async def reply(self, message, output):
result = ReadAllEventsCompleted(message)
await result.dispatch(self, output)
async def success(self, result: proto.ReadAllEventsCompleted, output: Queue):
if not self.has_first_page:
self.result.set_result(self.iterator)
self.has_first_page = True
events = [_make_event(x) for x in result.events]
await self.iterator.enqueue_items(events)
at_end = result.commit_position == result.next_commit_position
if at_end:
self.is_complete = True
await self.iterator.asend(StopAsyncIteration())
return
await output.put(
page_all_message(
self,
Position(result.next_commit_position, result.next_prepare_position),
)
)
async def error(self, exn: Exception) -> None:
self.is_complete = True
if self.has_first_page:
await self.iterator.asend(exn)
else:
self.result.set_exception(exn)
class IterStreamEvents(Conversation):
"""Command class for iterating events from a stream.
Args:
stream: The name of the stream containing the event.
resolve_links (optional): True if eventstore should
automatically resolve Link Events, otherwise False.
required_master (optional): True if this command must be
sent direct to the master node, otherwise False.
correlation_id (optional): A unique identifer for this
command.
"""
def __init__(
self,
stream: str,
from_event: int = None,
batch_size: int = 100,
resolve_links: bool = True,
require_master: bool = False,
direction: StreamDirection = StreamDirection.Forward,
credential=None,
conversation_id: UUID = None,
):
super().__init__(conversation_id, credential)
self.batch_size = batch_size
self.has_first_page = False
self.stream = stream
self.resolve_link_tos = resolve_links
self.require_master = require_master
self.direction = direction
self._logger = logging.get_named_logger(IterStreamEvents)
self.iterator = StreamingIterator(self.batch_size)
if direction == StreamDirection.Forward:
self.command = TcpCommand.ReadStreamEventsForward
self.from_event = from_event or 0
else:
self.command = TcpCommand.ReadStreamEventsBackward
self.from_event = from_event or -1
async def start(self, output: Queue):
await output.put(
page_stream_message(
self, self.iterator.last_event_number or self.from_event
)
)
self._logger.debug(
"IterStreamEvents started on %s (%s)", self.stream, self.conversation_id
)
async def reply(self, message: InboundMessage, output: Queue) -> None:
result = ReadStreamEventsCompleted(message)
await result.dispatch(self, output)
async def success(self, result: proto.ReadStreamEventsCompleted, output: Queue):
if not result.is_end_of_stream:
await output.put(page_stream_message(self, result.next_event_number))
events = [_make_event(x) for x in result.events]
await self.iterator.enqueue_items(events)
if not self.has_first_page:
self.result.set_result(self.iterator)
self.has_first_page = True
if result.is_end_of_stream:
self.is_complete = True
await self.iterator.asend(StopAsyncIteration())
async def error(self, exn: Exception) -> None:
self.is_complete = True
if self.has_first_page:
await self.iterator.asend(exn)
else:
self.result.set_exception(exn)
class PersistentSubscription:
def __init__(
self,
name,
stream,
correlation_id,
initial_commit,
initial_event_number,
buffer_size,
out_queue,
auto_ack=False,
):
self.initial_commit_position = initial_commit
self.name = name
self.conversation_id = correlation_id
self.last_event_number = initial_event_number
self.stream = stream
self.buffer_size = buffer_size
self.auto_ack = auto_ack
self.events = StreamingIterator()
self.out_queue = out_queue
def __str__(self):
return "Subscription in group %s to %s at event number %d" % (
self.name,
self.stream,
self.last_event_number,
)
async def ack(self, event):
payload = proto.PersistentSubscriptionAckEvents()
payload.subscription_id = self.name
payload.processed_event_ids.append(event.received_event.id.bytes_le)
message = OutboundMessage(
self.conversation_id,
TcpCommand.PersistentSubscriptionAckEvents,
payload.SerializeToString(),
)
await self.out_queue.put(message)
class CreatePersistentSubscription(Conversation):
def __init__(
self,
name,
stream,
resolve_links=True,
start_from=-1,
timeout_ms=30000,
record_statistics=False,
live_buffer_size=500,
read_batch_size=500,
buffer_size=1000,
max_retry_count=10,
prefer_round_robin=True,
checkpoint_after_ms=2000,
checkpoint_max_count=1024,
checkpoint_min_count=10,
subscriber_max_count=10,
credential=None,
conversation_id=None,
consumer_strategy=messages.ROUND_ROBIN,
) -> None:
super().__init__(conversation_id, credential)
self.stream = stream
self.name = name
self.resolve_links = resolve_links
self.start_from = start_from
self.timeout_ms = timeout_ms
self.record_statistics = record_statistics
self.live_buffer_size = live_buffer_size
self.read_batch_size = read_batch_size
self.buffer_size = buffer_size
self.max_retry_count = max_retry_count
self.prefer_round_robin = prefer_round_robin
self.checkpoint_after_time = checkpoint_after_ms
self.checkpoint_max_count = checkpoint_max_count
self.checkpoint_min_count = checkpoint_min_count
self.subscriber_max_count = subscriber_max_count
self.consumer_strategy = consumer_strategy
async def start(self, output: Queue) -> None:
msg = proto.CreatePersistentSubscription()
msg.subscription_group_name = self.name
msg.event_stream_id = self.stream
msg.start_from = self.start_from
msg.resolve_link_tos = self.resolve_links
msg.message_timeout_milliseconds = self.timeout_ms
msg.record_statistics = self.record_statistics
msg.live_buffer_size = self.live_buffer_size
msg.read_batch_size = self.read_batch_size
msg.buffer_size = self.buffer_size
msg.max_retry_count = self.max_retry_count
msg.prefer_round_robin = self.prefer_round_robin
msg.checkpoint_after_time = self.checkpoint_after_time
msg.checkpoint_max_count = self.checkpoint_max_count
msg.checkpoint_min_count = self.checkpoint_min_count
msg.subscriber_max_count = self.subscriber_max_count
msg.named_consumer_strategy = self.consumer_strategy
await output.put(
OutboundMessage(
self.conversation_id,
TcpCommand.CreatePersistentSubscription,
msg.SerializeToString(),
self.credential,
)
)
self._logger.debug(
"CreatePersistentSubscription started on %s (%s)",
self.stream,
self.conversation_id,
)
async def reply(self, message: InboundMessage, output: Queue) -> None:
self.expect_only(message, TcpCommand.CreatePersistentSubscriptionCompleted)
result = proto.CreatePersistentSubscriptionCompleted()
result.ParseFromString(message.payload)
if result.result == SubscriptionResult.Success:
self.is_complete = True
self.result.set_result(None)
elif result.result == SubscriptionResult.AccessDenied:
await self.error(
exceptions.AccessDenied(
self.conversation_id, type(self).__name__, result.reason
)
)
else:
await self.error(
exceptions.SubscriptionCreationFailed(
self.conversation_id, result.reason
)
)
class ConnectPersistentSubscription(Conversation):
class State(IntEnum):
init = 0
catch_up = 1
live = 2
def __init__(
self,
name,
stream,
max_in_flight=10,
credential=None,
conversation_id=None,
auto_ack=False,
) -> None:
super().__init__(conversation_id, credential)
self.stream = stream
self.max_in_flight = max_in_flight
self.name = name
self.is_live = False
self.auto_ack = auto_ack
async def start(self, output: Queue) -> None:
msg = proto.ConnectToPersistentSubscription()
msg.subscription_id = self.name
msg.event_stream_id = self.stream
msg.allowed_in_flight_messages = self.max_in_flight
await output.put(
OutboundMessage(
self.conversation_id,
TcpCommand.ConnectToPersistentSubscription,
msg.SerializeToString(),
self.credential,
)
)
self._logger.debug(
"ConnectPersistentSubscription started on %s (%s)",
self.stream,
self.conversation_id,
)
def reply_from_init(self, response: InboundMessage, output: Queue):
self.expect_only(response, TcpCommand.PersistentSubscriptionConfirmation)
result = proto.PersistentSubscriptionConfirmation()
result.ParseFromString(response.payload)
self.subscription = PersistentSubscription(
result.subscription_id,
self.stream,
self.conversation_id,
result.last_commit_position,
result.last_event_number,
self.max_in_flight,
output,
self.auto_ack,
)
self.is_live = True
self.result.set_result(self.subscription)
async def reply_from_live(self, response: InboundMessage, output: Queue):
if response.command == TcpCommand.PersistentSubscriptionConfirmation:
self.subscription.out_queue = output
return
self.expect_only(response, TcpCommand.PersistentSubscriptionStreamEventAppeared)
result = proto.StreamEventAppeared()
result.ParseFromString(response.payload)
await self.subscription.events.enqueue(_make_event(result.event))
async def drop_subscription(self, response: InboundMessage) -> None:
body = proto.SubscriptionDropped()
body.ParseFromString(response.payload)
if self.is_live and body.reason == messages.SubscriptionDropReason.Unsubscribed:
await self.subscription.events.enqueue(StopAsyncIteration())
return
if self.is_live:
await self.error(
exceptions.SubscriptionFailed(self.conversation_id, body.reason)
)
return
await self.error(
exceptions.SubscriptionCreationFailed(self.conversation_id, body.reason)
)
async def error(self, exn) -> None:
if self.is_live:
await self.subscription.events.asend(exn)
else:
self.result.set_exception(exn)
async def reply(self, message: InboundMessage, output: Queue) -> None:
if message.command == TcpCommand.SubscriptionDropped:
await self.drop_subscription(message)
elif self.is_live:
await self.reply_from_live(message, output)
else:
self.reply_from_init(message, output)
class SubscribeToStream(Conversation):
def __init__(
self, stream, resolve_link_tos=True, conversation_id=None, credential=None
):
self.stream = stream
self.resolve_link_tos = resolve_link_tos
self.is_live = False
super().__init__(conversation_id, credential)
async def start(self, output: Queue) -> None:
msg = proto.SubscribeToStream()
msg.event_stream_id = self.stream
msg.resolve_link_tos = self.resolve_link_tos
await output.put(
OutboundMessage(
self.conversation_id,
TcpCommand.SubscribeToStream,
msg.SerializeToString(),
self.credential,
)
)
self._logger.debug(
"SubscribeToStream started on %s (%s)", self.stream, self.conversation_id
)
async def drop_subscription(self, response: InboundMessage) -> None:
body = proto.SubscriptionDropped()
body.ParseFromString(response.payload)
if self.is_live and body.reason == messages.SubscriptionDropReason.Unsubscribed:
await self.subscription.events.enqueue(StopAsyncIteration())
return
if self.is_live:
await self.error(
exceptions.SubscriptionFailed(self.conversation_id, body.reason)
)
return
await self.error(
exceptions.SubscriptionCreationFailed(self.conversation_id, body.reason)
)
async def error(self, exn) -> None:
if self.is_live:
await self.subscription.raise_error(exn)
else:
self.result.set_exception(exn)
async def reply_from_init(self, message: InboundMessage, output: Queue):
self.expect_only(message, TcpCommand.SubscriptionConfirmation)
result = proto.SubscriptionConfirmation()
result.ParseFromString(message.payload)
self.subscription = VolatileSubscription(
self.conversation_id,
self.stream,
output,
result.last_event_number,
result.last_commit_position,
)
self.is_live = True
self.result.set_result(self.subscription)
async def reply_from_live(self, message: InboundMessage) -> None:
self.expect_only(
message, TcpCommand.StreamEventAppeared, TcpCommand.SubscriptionConfirmation
)
if message.command is TcpCommand.SubscriptionConfirmation:
return
result = proto.StreamEventAppeared()
result.ParseFromString(message.payload)
await self.subscription.events.enqueue(_make_event(result.event))
async def reply(self, message: InboundMessage, output: Queue) -> None:
if message.command == TcpCommand.SubscriptionDropped:
await self.drop_subscription(message)
elif self.is_live:
await self.reply_from_live(message)
else:
await self.reply_from_init(message, output)
class CatchupSubscriptionPhase(IntEnum):
READ_HISTORICAL = 0
CATCH_UP = 1
LIVE = 2
RECONNECT = 3
class VolatileSubscription:
def __init__(
self,
conversation_id,
stream,
queue,
event_number,
commit_position,
iterator=None,
):
self.stream = stream
self.output_queue = queue
self.id = conversation_id
self.first_event_number = event_number
self.first_commit_position = commit_position
self.last_event_number = event_number
self.last_commit_position = commit_position
self.events = iterator or StreamingIterator()
self.is_complete = False
async def unsubscribe(self):
await self.output_queue.put(
messages.OutboundMessage(self.id, TcpCommand.UnsubscribeFromStream, bytes())
)
async def raise_error(self, exn: Exception) -> None:
self.is_complete = True
await self.events.asend(exn)
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
if not self.is_complete:
await self.unsubscribe()
class __catchup(Conversation):
async def error(self, exn) -> None:
if self.result.done():
await self.subscription.raise_error(exn)
else:
self.result.set_exception(exn)
async def reconnect(self, output: Queue) -> None:
self.phase = CatchupSubscriptionPhase.RECONNECT
self.buffer = []
await self.subscription.unsubscribe()
async def start(self, output):
if self.phase > CatchupSubscriptionPhase.READ_HISTORICAL:
self._logger.info("Tear down previous subscription")
await self.reconnect(output)
return
self._logger.info("Starting catchup subscription at %s", self.from_event)
self.from_event = max(
self.from_event, self.next_event_number, self.last_event_number
)
await PageStreamEventsBehaviour.start(self, output)
self._logger.debug(
"CatchupSubscription started on %s (%s)", self.stream, self.conversation_id
)
async def drop_subscription(self, response: InboundMessage) -> None:
body = proto.SubscriptionDropped()
body.ParseFromString(response.payload)
if body.reason == messages.SubscriptionDropReason.Unsubscribed:
await self.subscription.events.enqueue(StopAsyncIteration())
return
if self.result.done():
await self.error(
exceptions.SubscriptionFailed(self.conversation_id, body.reason)
)
return
await self.error(
exceptions.SubscriptionCreationFailed(self.conversation_id, body.reason)
)
@property
def is_live(self):
return self.phase == CatchupSubscriptionPhase.LIVE
async def _move_to_next_phase(self, output):
if self.phase == CatchupSubscriptionPhase.READ_HISTORICAL:
self.phase = CatchupSubscriptionPhase.CATCH_UP
self._logger.info(
"Caught up with historical events, creating volatile subscription"
)
await self._subscribe(output)
elif self.phase == CatchupSubscriptionPhase.CATCH_UP:
self.phase = CatchupSubscriptionPhase.LIVE
await self._yield_events(self.buffer)
async def reply_from_live(self, message, output):
if message.command == TcpCommand.SubscriptionDropped:
await self.drop_subscription(message)
return
self.expect_only(message, TcpCommand.StreamEventAppeared)
result = proto.StreamEventAppeared()
result.ParseFromString(message.payload)
await self._yield_events([_make_event(result.event)])
async def reply_from_reconnect(self, message: InboundMessage, output: Queue):
if message.command != TcpCommand.SubscriptionDropped:
return
self.phase = CatchupSubscriptionPhase.READ_HISTORICAL
await self.start(output)
async def _subscribe(self, output: Queue) -> None:
msg = proto.SubscribeToStream()
msg.event_stream_id = self.stream
msg.resolve_link_tos = self.resolve_link_tos
await output.put(
OutboundMessage(
self.conversation_id,
TcpCommand.SubscribeToStream,
msg.SerializeToString(),
self.credential,
)
)
class CatchupSubscription(__catchup):
def __init__(
self,
stream,
start_from=0,
batch_size=100,
credential=None,
conversation_id=None,
):
self.stream = stream
self.iterator = StreamingIterator()
self.conversation_id = conversation_id or uuid4()
self._logger = logging.get_named_logger(
CatchupSubscription, self.conversation_id
)
self.from_event = start_from
self.direction = StreamDirection.Forward
self.batch_size = batch_size
self.has_first_page = False
self.require_master = False
self.resolve_link_tos = True
self.credential = credential
self.result = Future()
self.phase = CatchupSubscriptionPhase.READ_HISTORICAL
self.buffer = []
self.subscribe_from = -1
self.next_event_number = self.from_event
self.last_event_number = -1
super().__init__(conversation_id, credential)
async def start(self, output):
if self.phase > CatchupSubscriptionPhase.READ_HISTORICAL:
self._logger.info("Tear down previous subscription")
await self.reconnect(output)
return
self.from_event = max(
self.from_event, self.next_event_number, self.last_event_number
)
self._logger.info("Starting catchup subscription at %s", self.from_event)
await output.put(page_stream_message(self, self.from_event))
logging.debug(
"CatchupSubscription started on %s (%s)", self.stream, self.conversation_id
)
async def _yield_events(self, events):
for event in events:
if event.event_number <= self.last_event_number:
continue
await self.iterator.asend(event)
self.last_event_number = event.event_number
async def reply_from_catch_up(self, message, output):
if message.command == TcpCommand.SubscriptionDropped:
await self.drop_subscription(message)
elif message.command == TcpCommand.SubscriptionConfirmation:
confirmation = proto.SubscriptionConfirmation()
confirmation.ParseFromString(message.payload)
self.subscribe_from = confirmation.last_event_number
self._logger.info(
"Subscribed successfully, catching up with missed events from %s",
self.next_event_number,
)
await output.put(page_stream_message(self, self.next_event_number))
elif message.command == TcpCommand.StreamEventAppeared:
result = proto.StreamEventAppeared()
result.ParseFromString(message.payload)
self.buffer.append(_make_event(result.event))
else:
self.expect_only(message, TcpCommand.ReadStreamEventsForwardCompleted)
result = ReadStreamEventsCompleted(message)
await result.dispatch(self, output)
async def reply(self, message: InboundMessage, output: Queue):
if self.phase == CatchupSubscriptionPhase.READ_HISTORICAL:
self.expect_only(message, TcpCommand.ReadStreamEventsForwardCompleted)
result = ReadStreamEventsCompleted(message)
await result.dispatch(self, output)
elif self.phase == CatchupSubscriptionPhase.CATCH_UP:
await self.reply_from_catch_up(message, output)
elif self.phase == CatchupSubscriptionPhase.RECONNECT:
await self.reply_from_reconnect(message, output)
else:
await self.reply_from_live(message, output)
async def success(self, result: proto.ReadStreamEventsCompleted, output: Queue):
finished = False
events = []
for e in result.events:
event = _make_event(e)
events.append(event)
await self._yield_events(events)
self.next_event_number = result.next_event_number
# Todo: we should finish if the next event > subscription_start_pos
if result.is_end_of_stream:
finished = True
if not self.has_first_page:
self.subscription = VolatileSubscription(
self.conversation_id, self.stream, output, 0, 0, self.iterator
)
self.result.set_result(self.subscription)
self.has_first_page = True
if finished:
await self._move_to_next_phase(output)
else:
await output.put(page_stream_message(self, result.next_event_number))
class CatchupAllSubscription(__catchup):
name = "$all"
stream = ""
def __init__(
self, start_from=None, batch_size=100, credential=None, conversation_id=None
):
self.iterator = StreamingIterator()
self.conversation_id = conversation_id or uuid4()
self._logger = logging.get_named_logger(
CatchupAllSubscription, self.conversation_id
)
self.from_position = start_from or Position(0, 0)
self.direction = StreamDirection.Forward
self.batch_size = batch_size
self.has_first_page = False
self.require_master = False
self.resolve_link_tos = True
self.credential = credential
self.result = Future()
self.phase = CatchupSubscriptionPhase.READ_HISTORICAL
self.buffer = []
self.next_position = self.from_position
self.last_position = Position.min
super().__init__(conversation_id, credential)
async def _yield_events(self, events):
for event in events:
print(
event.position, self.last_position, event.position > self.last_position
)
if event.position > self.last_position:
await self.iterator.enqueue(event)
self.last_position = event.position
async def start(self, output):
if self.phase > CatchupSubscriptionPhase.READ_HISTORICAL:
self._logger.info("Tear down previous subscription")
await self.reconnect(output)
return
self.from_position = max(self.from_position, self.last_position)
self._logger.info("Starting catchup subscription at %s", self.from_position)
if self.direction == StreamDirection.Forward:
command = TcpCommand.ReadAllEventsForward
else:
command = TcpCommand.ReadAllEventsBackward
msg = proto.ReadAllEvents()
msg.commit_position = self.from_position.commit
msg.prepare_position = self.from_position.prepare
msg.max_count = self.batch_size
msg.resolve_link_tos = self.resolve_link_tos
msg.require_master = self.require_master
data = msg.SerializeToString()
await output.put(
OutboundMessage(self.conversation_id, command, data, self.credential)
)
self._logger.debug("CatchupAllSubscription started (%s)", self.conversation_id)
async def reply_from_catch_up(self, message, output):
if message.command == TcpCommand.SubscriptionDropped:
await self.drop_subscription(message)
elif message.command == TcpCommand.SubscriptionConfirmation:
confirmation = proto.SubscriptionConfirmation()
confirmation.ParseFromString(message.payload)
self._logger.info(
"Subscribed successfully, catching up with missed events from %s",
self.from_position,
)
await output.put(page_all_message(self, self.from_position))
elif message.command == TcpCommand.StreamEventAppeared:
result = proto.StreamEventAppeared()
result.ParseFromString(message.payload)
self.buffer.append(_make_event(result.event))
else:
result = ReadAllEventsCompleted(message)
await result.dispatch(self, output)
async def reply(self, message: InboundMessage, output: Queue):
if self.phase == CatchupSubscriptionPhase.READ_HISTORICAL:
self.expect_only(message, TcpCommand.ReadAllEventsForwardCompleted)
result = ReadAllEventsCompleted(message)
await result.dispatch(self, output)
elif self.phase == CatchupSubscriptionPhase.CATCH_UP:
await self.reply_from_catch_up(message, output)
elif self.phase == CatchupSubscriptionPhase.RECONNECT:
await self.reply_from_reconnect(message, output)
else:
await self.reply_from_live(message, output)
async def success(self, result: proto.ReadStreamEventsCompleted, output: Queue):
finished = result.commit_position == result.next_commit_position
events = []
for e in result.events:
event = _make_event(e)
events.append(event)
# Todo: we should finish if the next event > subscription_start_pos
if not self.has_first_page:
self.subscription = VolatileSubscription(
self.conversation_id, self.name, output, 0, 0, self.iterator
)
self.result.set_result(self.subscription)
self.has_first_page = True
await self._yield_events(events)
self.from_position = Position(
result.next_commit_position, result.next_prepare_position
)
if finished:
await self._move_to_next_phase(output)
else:
await output.put(page_all_message(self, self.from_position))
|
import json
import logging
import sys
import time
from asyncio import Future, Queue
try:
from asyncio.exceptions import InvalidStateError
except ImportError:
from asyncio.futures import InvalidStateError
from enum import IntEnum
from typing import Optional, Sequence, Union
from uuid import UUID, uuid4
from photonpump import exceptions
from photonpump import messages
from photonpump import messages_pb2 as proto
from photonpump.messages import (
AllStreamSlice,
ContentType,
Credential,
ExpectedVersion,
InboundMessage,
NewEvent,
NotHandledReason,
OutboundMessage,
Position,
ReadAllResult,
ReadEventResult,
ReadStreamResult,
StreamDirection,
StreamSlice,
SubscriptionResult,
TcpCommand,
_make_event,
)
class StreamingIterator:
def __init__(self, size=0):
self.items = Queue(size)
self.finished = False
self.fut = None
self.last_item = None
def __aiter__(self):
return self
async def enqueue_items(self, items):
for item in items:
await self.enqueue(item)
async def enqueue(self, item):
await self.items.put(item)
self.last_item = item
async def anext(self):
return await self.__anext__()
async def __anext__(self):
if self.finished and self.items.empty():
raise StopAsyncIteration()
_next = await self.items.get()
if isinstance(_next, Exception):
raise _next
return _next
async def asend(self, item):
await self.items.put(item)
@property
def last_event_number(self):
if self.last_item is None:
return None
return self.last_item.event_number
class Conversation:
def __init__(
self,
conversation_id: Optional[UUID] = None,
credential: Optional[Credential] = None,
) -> None:
self.conversation_id = conversation_id or uuid4()
self.result: Future = Future()
self.is_complete = False
self.credential = credential
self._logger = logging.get_named_logger(Conversation)
self.one_way = False
def __str__(self):
return "<%s %s>" % (type(self).__name__, self.conversation_id)
def __eq__(self, other):
if not isinstance(other, Conversation):
return False
return self.conversation_id == other.conversation_id
async def start(self, output: Queue) -> Future:
raise NotImplemented()
async def reply(self, message: InboundMessage, output: Queue) -> None:
raise NotImplementedError()
async def error(self, exn: Exception) -> None:
self.is_complete = True
self.result.set_exception(exn)
def expect_only(self, response: InboundMessage, *commands: TcpCommand):
if response.command not in commands:
raise exceptions.UnexpectedCommand(commands, response.command)
async def respond_to(self, response: InboundMessage, output: Queue) -> None:
try:
if response.command is TcpCommand.BadRequest:
return await self.conversation_error(exceptions.BadRequest, response)
if response.command is TcpCommand.NotAuthenticated:
return await self.conversation_error(
exceptions.NotAuthenticated, response
)
if response.command is TcpCommand.NotHandled:
return await self.unhandled_message(response)
return await self.reply(response, output)
except Exception as exn:
self._logger.exception("Failed to read server response", exc_info=True)
exc_info = sys.exc_info()
return await self.error(
exceptions.PayloadUnreadable(
self.conversation_id, response.payload, exn
).with_traceback(exc_info[2])
)
async def unhandled_message(self, response) -> None:
body = proto.NotHandled()
body.ParseFromString(response.payload)
if body.reason == NotHandledReason.NotReady:
exn = exceptions.NotReady(self.conversation_id)
elif body.reason == NotHandledReason.TooBusy:
exn = exceptions.TooBusy(self.conversation_id)
elif body.reason == NotHandledReason.NotMaster:
exn = exceptions.NotMaster(self.conversation_id)
else:
exn = exceptions.NotHandled(self.conversation_id, body.reason)
return await self.error(exn)
async def conversation_error(self, exn_type, response) -> None:
error = response.payload.decode("UTF-8")
exn = exn_type(self.conversation_id, error)
return await self.error(exn)
class TimerConversation(Conversation):
def __init__(self, conversation_id, credential):
super().__init__(conversation_id, credential)
self.started_at = time.perf_counter()
async def start(self, output: Queue) -> None:
self.started_at = time.perf_counter()
self._logger.debug("TimerConversation started (%s)", self.conversation_id)
async def reply(self, message: InboundMessage, output: Queue) -> None:
self._logger.info("Replying from conversation %s", self)
responded_at = time.perf_counter()
self.result.set_result(responded_at - self.started_at)
self.is_complete = True
class Heartbeat(TimerConversation):
INBOUND = 0
OUTBOUND = 1
def __init__(
self, conversation_id: UUID, direction=INBOUND, credential=None
) -> None:
super().__init__(conversation_id, credential=None)
self.direction = direction
self.result = Future()
async def start(self, output: Queue) -> Future:
await super().start(output)
if self.direction == Heartbeat.INBOUND:
one_way = True
cmd = TcpCommand.HeartbeatResponse
else:
one_way = False
cmd = TcpCommand.HeartbeatRequest
await output.put(
OutboundMessage(
self.conversation_id, cmd, b"", self.credential, one_way=one_way
)
)
self._logger.debug("Heartbeat started (%s)", self.conversation_id)
async def reply(self, message: InboundMessage, output: Queue) -> None:
self.expect_only(message, TcpCommand.HeartbeatResponse)
await super().reply(message, output)
class Ping(TimerConversation):
def __init__(self, conversation_id: UUID = None, credential=None) -> None:
super().__init__(conversation_id or uuid4(), credential)
async def start(self, output: Queue) -> Future:
await super().start(output)
if output:
await output.put(
OutboundMessage(
self.conversation_id, TcpCommand.Ping, b"", self.credential
)
)
self._logger.debug("Ping started (%s)", self.conversation_id)
return self.result
async def reply(self, message: InboundMessage, output: Queue) -> None:
self.expect_only(message, TcpCommand.Pong)
await super().reply(message, output)
class WriteEvents(Conversation):
"""Command class for writing a sequence of events to a single
stream.
Args:
stream: The name of the stream to write to.
events: A sequence of events to write.
expected_version (optional): The expected version of the
target stream used for concurrency control.
required_master (optional): True if this command must be
sent direct to the master node, otherwise False.
correlation_id (optional): A unique identifer for this
command.
"""
def __init__(
self,
stream: str,
events: Sequence[NewEvent],
expected_version: Union[ExpectedVersion, int] = ExpectedVersion.Any,
require_master: bool = False,
conversation_id: UUID = None,
credential=None,
loop=None,
):
super().__init__(conversation_id, credential)
self._logger = logging.get_named_logger(WriteEvents)
self.stream = stream
self.require_master = require_master
self.events = events
self.expected_version = expected_version
async def start(self, output: Queue) -> None:
msg = proto.WriteEvents()
msg.event_stream_id = self.stream
msg.require_master = self.require_master
msg.expected_version = self.expected_version
for event in self.events:
e = msg.events.add()
e.event_id = event.id.bytes_le
e.event_type = event.type
if isinstance(event.data, str):
e.data_content_type = ContentType.Json
e.data = event.data.encode("UTF-8")
elif isinstance(event.data, bytes):
e.data_content_type = ContentType.Binary
e.data = event.data
elif event.data:
e.data_content_type = ContentType.Json
e.data = json.dumps(event.data).encode("UTF-8")
else:
e.data_content_type = ContentType.Binary
e.data = bytes()
if event.metadata:
e.metadata_content_type = ContentType.Json
e.metadata = json.dumps(event.metadata).encode("UTF-8")
else:
e.metadata_content_type = ContentType.Binary
e.metadata = bytes()
data = msg.SerializeToString()
await output.put(
OutboundMessage(
self.conversation_id, TcpCommand.WriteEvents, data, self.credential
)
)
self._logger.debug(
"WriteEvents started on %s (%s)", self.stream, self.conversation_id
)
async def reply(self, message: InboundMessage, output: Queue) -> None:
self.expect_only(message, TcpCommand.WriteEventsCompleted)
result = proto.WriteEventsCompleted()
result.ParseFromString(message.payload)
if result.result == proto.AccessDenied:
await self.error(
exceptions.AccessDenied(
self.conversation_id, type(self).__name__, result.message
)
)
try:
self.result.set_result(result)
self.is_complete = True
except InvalidStateError as exn:
self._logger.error(self.result, message, self, exc_info=True)
raise exn
class ReadAllEventsCompleted:
def __init__(self, message: InboundMessage):
self._data = proto.ReadAllEventsCompleted()
self._data.ParseFromString(message.payload)
self._conversation_id = message.conversation_id
async def dispatch(self, conversation, output):
if self._data.result == ReadAllResult.Success:
await conversation.success(self._data, output)
elif self._data.result == ReadAllResult.Error:
await conversation.error(
exceptions.ReadError(self._conversation_id, "$all", self._data.error)
)
elif self._data.result == ReadAllResult.AccessDenied:
await conversation.error(
exceptions.AccessDenied(
self._conversation_id, type(self).__name__, self._data.error
)
)
class ReadEventCompleted:
def __init__(self, message):
self._data = proto.ReadEventCompleted()
self._data.ParseFromString(message.payload)
self._conversation_id = message.conversation_id
async def dispatch(self, conversation, output):
result = self._data.result
if result == ReadEventResult.Success:
await conversation.success(self._data, output)
elif result == ReadEventResult.NoStream:
await conversation.error(
exceptions.StreamNotFound(self._conversation_id, conversation.stream)
)
elif result == ReadEventResult.StreamDeleted:
await conversation.error(
exceptions.StreamDeleted(self._conversation_id, conversation.stream)
)
elif result == ReadEventResult.Error:
await conversation.error(
exceptions.ReadError(self._conversation_id, conversation.stream, result)
)
elif result == ReadEventResult.AccessDenied:
await conversation.error(
exceptions.AccessDenied(
self._conversation_id,
type(conversation).__name__,
self._data.error,
stream=conversation.stream,
)
)
elif result == ReadEventResult.NotFound:
await conversation.error(
exceptions.EventNotFound(
self._conversation_id, conversation.name, conversation.event_number
)
)
class ReadStreamEventsCompleted:
def __init__(self, message):
self._data = proto.ReadStreamEventsCompleted()
self._data.ParseFromString(message.payload)
self._conversation_id = message.conversation_id
async def dispatch(self, conversation, output):
result = self._data.result
if result == ReadStreamResult.Success:
await conversation.success(self._data, output)
elif result == ReadStreamResult.NoStream:
await conversation.error(
exceptions.StreamNotFound(self._conversation_id, conversation.stream)
)
elif result == ReadStreamResult.StreamDeleted:
await conversation.error(
exceptions.StreamDeleted(self._conversation_id, conversation.stream)
)
elif result == ReadStreamResult.Error:
await conversation.error(
exceptions.ReadError(self._conversation_id, conversation.stream, result)
)
elif result == ReadStreamResult.AccessDenied:
await conversation.error(
exceptions.AccessDenied(
self._conversation_id,
type(conversation).__name__,
self._data.error,
stream=conversation.stream,
)
)
elif (
self.result_type == ReadEventResult
and result.result == self.result_type.NotFound
):
await self.error(
exceptions.EventNotFound(
self.conversation_id, self.stream, self.event_number
)
)
class ReadEvent(Conversation):
"""Command class for reading a single event.
Args:
stream: The name of the stream containing the event.
event_number: The sequence number of the event to read.
resolve_links (optional): True if eventstore should
automatically resolve Link Events, otherwise False.
required_master (optional): True if this command must be
sent direct to the master node, otherwise False.
correlation_id (optional): A unique identifer for this
command.
"""
def __init__(
self,
stream: str,
event_number: int,
resolve_links: bool = True,
require_master: bool = False,
conversation_id: Optional[UUID] = None,
credential=None,
) -> None:
super().__init__(conversation_id, credential=credential)
self.stream = stream
self.event_number = event_number
self.require_master = require_master
self.resolve_link_tos = resolve_links
self.name = stream
async def start(self, output: Queue) -> None:
msg = proto.ReadEvent()
msg.event_number = self.event_number
msg.event_stream_id = self.stream
msg.require_master = self.require_master
msg.resolve_link_tos = self.resolve_link_tos
data = msg.SerializeToString()
await output.put(
OutboundMessage(
self.conversation_id, TcpCommand.Read, data, self.credential
)
)
self._logger.debug(
"ReadEvent started on %s (%s)", self.stream, self.conversation_id
)
async def reply(self, message: InboundMessage, output: Queue):
result = ReadEventCompleted(message)
await result.dispatch(self, output)
async def success(self, response, output: Queue):
self.is_complete = True
self.result.set_result(_make_event(response.event))
def page_stream_message(conversation, from_event):
if conversation.direction == StreamDirection.Forward:
command = TcpCommand.ReadStreamEventsForward
else:
command = TcpCommand.ReadStreamEventsBackward
msg = proto.ReadStreamEvents()
msg.event_stream_id = conversation.stream
msg.from_event_number = from_event
msg.max_count = conversation.batch_size
msg.require_master = conversation.require_master
msg.resolve_link_tos = conversation.resolve_link_tos
data = msg.SerializeToString()
return OutboundMessage(
conversation.conversation_id, command, data, conversation.credential
)
def page_all_message(conversation, from_position: Position):
if conversation.direction == StreamDirection.Forward:
command = TcpCommand.ReadAllEventsForward
else:
command = TcpCommand.ReadAllEventsBackward
msg = proto.ReadAllEvents()
msg.commit_position = from_position.commit
msg.prepare_position = from_position.prepare
msg.max_count = conversation.batch_size
msg.require_master = conversation.require_master
msg.resolve_link_tos = conversation.resolve_link_tos
data = msg.SerializeToString()
return OutboundMessage(
conversation.conversation_id, command, data, conversation.credential
)
class ReadAllEvents(Conversation):
"""Command class for reading all events from a stream.
Args:
commit_position: The commit_position.
event_number: The sequence number of the event to read.
resolve_links (optional): True if eventstore should
automatically resolve Link Events, otherwise False.
required_master (optional): True if this command must be
sent direct to the master node, otherwise False.
correlation_id (optional): A unique identifer for this
command.
"""
def __init__(
self,
from_position: Optional[Position] = None,
max_count: int = 100,
resolve_links: bool = True,
require_master: bool = False,
direction: StreamDirection = StreamDirection.Forward,
credential=None,
conversation_id: UUID = None,
) -> None:
super().__init__(conversation_id, credential=credential)
self.has_first_page = False
self.direction = direction
self.from_position = from_position
self.batch_size = max_count
self.require_master = require_master
self.resolve_link_tos = resolve_links
async def reply(self, message: InboundMessage, output: Queue) -> None:
result = ReadAllEventsCompleted(message)
await result.dispatch(self, output)
async def success(self, result: proto.ReadAllEventsCompleted, output: Queue):
events = [_make_event(x) for x in result.events]
self.is_complete = True
self.result.set_result(
AllStreamSlice(
events,
Position(result.next_commit_position, result.next_prepare_position),
Position(result.commit_position, result.prepare_position),
)
)
async def start(self, output):
await output.put(page_all_message(self, self.from_position))
class ReadStreamEvents(Conversation):
"""Command class for reading events from a stream.
Args:
stream: The name of the stream containing the event.
event_number: The sequence number of the event to read.
resolve_links (optional): True if eventstore should
automatically resolve Link Events, otherwise False.
required_master (optional): True if this command must be
sent direct to the master node, otherwise False.
correlation_id (optional): A unique identifer for this
command.
"""
def __init__(
self,
stream: str,
from_event: int = 0,
max_count: int = 100,
resolve_links: bool = True,
require_master: bool = False,
direction: StreamDirection = StreamDirection.Forward,
credential=None,
conversation_id: UUID = None,
) -> None:
super().__init__(conversation_id, credential=credential)
self.has_first_page = False
self.stream = stream
self.direction = direction
self.from_event = from_event
self.batch_size = max_count
self.require_master = require_master
self.resolve_link_tos = resolve_links
async def reply(self, message: InboundMessage, output: Queue):
result = ReadStreamEventsCompleted(message)
await result.dispatch(self, output)
async def start(self, output: Queue) -> None:
message = page_stream_message(self, self.from_event)
await output.put(message)
self._logger.debug(
"Starting ReadStreamEvents (%d events starting at %d from %s)",
self.batch_size,
self.from_event,
self.stream,
)
async def success(self, result: proto.ReadStreamEventsCompleted, output: Queue):
events = [_make_event(x) for x in result.events]
self.is_complete = True
self.result.set_result(
StreamSlice(
events,
result.next_event_number,
result.last_event_number,
None,
result.last_commit_position,
result.is_end_of_stream,
)
)
class IterAllEvents(Conversation):
"""
Command class for iterating all events in the database.
Args:
from_position (optional): The position to start reading from.
Defaults to photonpump.Beginning when direction is Forward,
photonpump.End when direction is Backward.
batch_size (optional): The maximum number of events to read at a time.
resolve_links (optional): True if eventstore should
automatically resolve Link Events, otherwise False.
require_master (optional): True if this command must be
sent direct to the master node, otherwise False.
direction (optional): Controls whether to read forward or backward
through the events. Defaults to StreamDirection.Forward
correlation_id (optional): A unique identifer for this
command.
"""
def __init__(
self,
from_position: Position = None,
batch_size: int = 100,
resolve_links: bool = True,
require_master: bool = False,
direction: StreamDirection = StreamDirection.Forward,
credential=None,
conversation_id: UUID = None,
):
super().__init__(conversation_id, credential)
self.batch_size = batch_size
self.has_first_page = False
self.resolve_link_tos = resolve_links
self.require_master = require_master
self.from_position = from_position or Position(0, 0)
self.direction = direction
self._logger = logging.get_named_logger(IterAllEvents)
self.iterator = StreamingIterator(self.batch_size)
if direction == StreamDirection.Forward:
self.command = TcpCommand.ReadAllEventsForward
else:
self.command = TcpCommand.ReadAllEventsBackward
async def start(self, output):
await output.put(page_all_message(self, self.from_position))
self._logger.debug("IterAllEvents started (%s)", self.conversation_id)
async def reply(self, message, output):
result = ReadAllEventsCompleted(message)
await result.dispatch(self, output)
async def success(self, result: proto.ReadAllEventsCompleted, output: Queue):
if not self.has_first_page:
self.result.set_result(self.iterator)
self.has_first_page = True
events = [_make_event(x) for x in result.events]
await self.iterator.enqueue_items(events)
at_end = result.commit_position == result.next_commit_position
if at_end:
self.is_complete = True
await self.iterator.asend(StopAsyncIteration())
return
await output.put(
page_all_message(
self,
Position(result.next_commit_position, result.next_prepare_position),
)
)
async def error(self, exn: Exception) -> None:
self.is_complete = True
if self.has_first_page:
await self.iterator.asend(exn)
else:
self.result.set_exception(exn)
class IterStreamEvents(Conversation):
"""Command class for iterating events from a stream.
Args:
stream: The name of the stream containing the event.
resolve_links (optional): True if eventstore should
automatically resolve Link Events, otherwise False.
required_master (optional): True if this command must be
sent direct to the master node, otherwise False.
correlation_id (optional): A unique identifer for this
command.
"""
def __init__(
self,
stream: str,
from_event: int = None,
batch_size: int = 100,
resolve_links: bool = True,
require_master: bool = False,
direction: StreamDirection = StreamDirection.Forward,
credential=None,
conversation_id: UUID = None,
):
super().__init__(conversation_id, credential)
self.batch_size = batch_size
self.has_first_page = False
self.stream = stream
self.resolve_link_tos = resolve_links
self.require_master = require_master
self.direction = direction
self._logger = logging.get_named_logger(IterStreamEvents)
self.iterator = StreamingIterator(self.batch_size)
if direction == StreamDirection.Forward:
self.command = TcpCommand.ReadStreamEventsForward
self.from_event = from_event or 0
else:
self.command = TcpCommand.ReadStreamEventsBackward
self.from_event = from_event or -1
async def start(self, output: Queue):
await output.put(
page_stream_message(
self, self.iterator.last_event_number or self.from_event
)
)
self._logger.debug(
"IterStreamEvents started on %s (%s)", self.stream, self.conversation_id
)
async def reply(self, message: InboundMessage, output: Queue) -> None:
result = ReadStreamEventsCompleted(message)
await result.dispatch(self, output)
async def success(self, result: proto.ReadStreamEventsCompleted, output: Queue):
if not result.is_end_of_stream:
await output.put(page_stream_message(self, result.next_event_number))
events = [_make_event(x) for x in result.events]
await self.iterator.enqueue_items(events)
if not self.has_first_page:
self.result.set_result(self.iterator)
self.has_first_page = True
if result.is_end_of_stream:
self.is_complete = True
await self.iterator.asend(StopAsyncIteration())
async def error(self, exn: Exception) -> None:
self.is_complete = True
if self.has_first_page:
await self.iterator.asend(exn)
else:
self.result.set_exception(exn)
class PersistentSubscription:
def __init__(
self,
name,
stream,
correlation_id,
initial_commit,
initial_event_number,
buffer_size,
out_queue,
auto_ack=False,
):
self.initial_commit_position = initial_commit
self.name = name
self.conversation_id = correlation_id
self.last_event_number = initial_event_number
self.stream = stream
self.buffer_size = buffer_size
self.auto_ack = auto_ack
self.events = StreamingIterator()
self.out_queue = out_queue
def __str__(self):
return "Subscription in group %s to %s at event number %d" % (
self.name,
self.stream,
self.last_event_number,
)
async def ack(self, event):
payload = proto.PersistentSubscriptionAckEvents()
payload.subscription_id = self.name
payload.processed_event_ids.append(event.received_event.id.bytes_le)
message = OutboundMessage(
self.conversation_id,
TcpCommand.PersistentSubscriptionAckEvents,
payload.SerializeToString(),
)
await self.out_queue.put(message)
class CreatePersistentSubscription(Conversation):
def __init__(
self,
name,
stream,
resolve_links=True,
start_from=-1,
timeout_ms=30000,
record_statistics=False,
live_buffer_size=500,
read_batch_size=500,
buffer_size=1000,
max_retry_count=10,
prefer_round_robin=True,
checkpoint_after_ms=2000,
checkpoint_max_count=1024,
checkpoint_min_count=10,
subscriber_max_count=10,
credential=None,
conversation_id=None,
consumer_strategy=messages.ROUND_ROBIN,
) -> None:
super().__init__(conversation_id, credential)
self.stream = stream
self.name = name
self.resolve_links = resolve_links
self.start_from = start_from
self.timeout_ms = timeout_ms
self.record_statistics = record_statistics
self.live_buffer_size = live_buffer_size
self.read_batch_size = read_batch_size
self.buffer_size = buffer_size
self.max_retry_count = max_retry_count
self.prefer_round_robin = prefer_round_robin
self.checkpoint_after_time = checkpoint_after_ms
self.checkpoint_max_count = checkpoint_max_count
self.checkpoint_min_count = checkpoint_min_count
self.subscriber_max_count = subscriber_max_count
self.consumer_strategy = consumer_strategy
async def start(self, output: Queue) -> None:
msg = proto.CreatePersistentSubscription()
msg.subscription_group_name = self.name
msg.event_stream_id = self.stream
msg.start_from = self.start_from
msg.resolve_link_tos = self.resolve_links
msg.message_timeout_milliseconds = self.timeout_ms
msg.record_statistics = self.record_statistics
msg.live_buffer_size = self.live_buffer_size
msg.read_batch_size = self.read_batch_size
msg.buffer_size = self.buffer_size
msg.max_retry_count = self.max_retry_count
msg.prefer_round_robin = self.prefer_round_robin
msg.checkpoint_after_time = self.checkpoint_after_time
msg.checkpoint_max_count = self.checkpoint_max_count
msg.checkpoint_min_count = self.checkpoint_min_count
msg.subscriber_max_count = self.subscriber_max_count
msg.named_consumer_strategy = self.consumer_strategy
await output.put(
OutboundMessage(
self.conversation_id,
TcpCommand.CreatePersistentSubscription,
msg.SerializeToString(),
self.credential,
)
)
self._logger.debug(
"CreatePersistentSubscription started on %s (%s)",
self.stream,
self.conversation_id,
)
async def reply(self, message: InboundMessage, output: Queue) -> None:
self.expect_only(message, TcpCommand.CreatePersistentSubscriptionCompleted)
result = proto.CreatePersistentSubscriptionCompleted()
result.ParseFromString(message.payload)
if result.result == SubscriptionResult.Success:
self.is_complete = True
self.result.set_result(None)
elif result.result == SubscriptionResult.AccessDenied:
await self.error(
exceptions.AccessDenied(
self.conversation_id, type(self).__name__, result.reason
)
)
else:
await self.error(
exceptions.SubscriptionCreationFailed(
self.conversation_id, result.reason
)
)
class ConnectPersistentSubscription(Conversation):
class State(IntEnum):
init = 0
catch_up = 1
live = 2
def __init__(
self,
name,
stream,
max_in_flight=10,
credential=None,
conversation_id=None,
auto_ack=False,
) -> None:
super().__init__(conversation_id, credential)
self.stream = stream
self.max_in_flight = max_in_flight
self.name = name
self.is_live = False
self.auto_ack = auto_ack
async def start(self, output: Queue) -> None:
msg = proto.ConnectToPersistentSubscription()
msg.subscription_id = self.name
msg.event_stream_id = self.stream
msg.allowed_in_flight_messages = self.max_in_flight
await output.put(
OutboundMessage(
self.conversation_id,
TcpCommand.ConnectToPersistentSubscription,
msg.SerializeToString(),
self.credential,
)
)
self._logger.debug(
"ConnectPersistentSubscription started on %s (%s)",
self.stream,
self.conversation_id,
)
def reply_from_init(self, response: InboundMessage, output: Queue):
self.expect_only(response, TcpCommand.PersistentSubscriptionConfirmation)
result = proto.PersistentSubscriptionConfirmation()
result.ParseFromString(response.payload)
self.subscription = PersistentSubscription(
result.subscription_id,
self.stream,
self.conversation_id,
result.last_commit_position,
result.last_event_number,
self.max_in_flight,
output,
self.auto_ack,
)
self.is_live = True
self.result.set_result(self.subscription)
async def reply_from_live(self, response: InboundMessage, output: Queue):
if response.command == TcpCommand.PersistentSubscriptionConfirmation:
self.subscription.out_queue = output
return
self.expect_only(response, TcpCommand.PersistentSubscriptionStreamEventAppeared)
result = proto.StreamEventAppeared()
result.ParseFromString(response.payload)
await self.subscription.events.enqueue(_make_event(result.event))
async def drop_subscription(self, response: InboundMessage) -> None:
body = proto.SubscriptionDropped()
body.ParseFromString(response.payload)
if self.is_live and body.reason == messages.SubscriptionDropReason.Unsubscribed:
await self.subscription.events.enqueue(StopAsyncIteration())
return
if self.is_live:
await self.error(
exceptions.SubscriptionFailed(self.conversation_id, body.reason)
)
return
await self.error(
exceptions.SubscriptionCreationFailed(self.conversation_id, body.reason)
)
async def error(self, exn) -> None:
if self.is_live:
await self.subscription.events.asend(exn)
else:
self.result.set_exception(exn)
async def reply(self, message: InboundMessage, output: Queue) -> None:
if message.command == TcpCommand.SubscriptionDropped:
await self.drop_subscription(message)
elif self.is_live:
await self.reply_from_live(message, output)
else:
self.reply_from_init(message, output)
class SubscribeToStream(Conversation):
def __init__(
self, stream, resolve_link_tos=True, conversation_id=None, credential=None
):
self.stream = stream
self.resolve_link_tos = resolve_link_tos
self.is_live = False
super().__init__(conversation_id, credential)
async def start(self, output: Queue) -> None:
msg = proto.SubscribeToStream()
msg.event_stream_id = self.stream
msg.resolve_link_tos = self.resolve_link_tos
await output.put(
OutboundMessage(
self.conversation_id,
TcpCommand.SubscribeToStream,
msg.SerializeToString(),
self.credential,
)
)
self._logger.debug(
"SubscribeToStream started on %s (%s)", self.stream, self.conversation_id
)
async def drop_subscription(self, response: InboundMessage) -> None:
body = proto.SubscriptionDropped()
body.ParseFromString(response.payload)
if self.is_live and body.reason == messages.SubscriptionDropReason.Unsubscribed:
await self.subscription.events.enqueue(StopAsyncIteration())
return
if self.is_live:
await self.error(
exceptions.SubscriptionFailed(self.conversation_id, body.reason)
)
return
await self.error(
exceptions.SubscriptionCreationFailed(self.conversation_id, body.reason)
)
async def error(self, exn) -> None:
if self.is_live:
await self.subscription.raise_error(exn)
else:
self.result.set_exception(exn)
async def reply_from_init(self, message: InboundMessage, output: Queue):
self.expect_only(message, TcpCommand.SubscriptionConfirmation)
result = proto.SubscriptionConfirmation()
result.ParseFromString(message.payload)
self.subscription = VolatileSubscription(
self.conversation_id,
self.stream,
output,
result.last_event_number,
result.last_commit_position,
)
self.is_live = True
self.result.set_result(self.subscription)
async def reply_from_live(self, message: InboundMessage) -> None:
self.expect_only(
message, TcpCommand.StreamEventAppeared, TcpCommand.SubscriptionConfirmation
)
if message.command is TcpCommand.SubscriptionConfirmation:
return
result = proto.StreamEventAppeared()
result.ParseFromString(message.payload)
await self.subscription.events.enqueue(_make_event(result.event))
async def reply(self, message: InboundMessage, output: Queue) -> None:
if message.command == TcpCommand.SubscriptionDropped:
await self.drop_subscription(message)
elif self.is_live:
await self.reply_from_live(message)
else:
await self.reply_from_init(message, output)
class CatchupSubscriptionPhase(IntEnum):
READ_HISTORICAL = 0
CATCH_UP = 1
LIVE = 2
RECONNECT = 3
class VolatileSubscription:
def __init__(
self,
conversation_id,
stream,
queue,
event_number,
commit_position,
iterator=None,
):
self.stream = stream
self.output_queue = queue
self.id = conversation_id
self.first_event_number = event_number
self.first_commit_position = commit_position
self.last_event_number = event_number
self.last_commit_position = commit_position
self.events = iterator or StreamingIterator()
self.is_complete = False
async def unsubscribe(self):
await self.output_queue.put(
messages.OutboundMessage(self.id, TcpCommand.UnsubscribeFromStream, bytes())
)
async def raise_error(self, exn: Exception) -> None:
self.is_complete = True
await self.events.asend(exn)
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
if not self.is_complete:
await self.unsubscribe()
class __catchup(Conversation):
async def error(self, exn) -> None:
if self.result.done():
await self.subscription.raise_error(exn)
else:
self.result.set_exception(exn)
async def reconnect(self, output: Queue) -> None:
self.phase = CatchupSubscriptionPhase.RECONNECT
self.buffer = []
await self.subscription.unsubscribe()
async def start(self, output):
if self.phase > CatchupSubscriptionPhase.READ_HISTORICAL:
self._logger.info("Tear down previous subscription")
await self.reconnect(output)
return
self._logger.info("Starting catchup subscription at %s", self.from_event)
self.from_event = max(
self.from_event, self.next_event_number, self.last_event_number
)
await PageStreamEventsBehaviour.start(self, output)
self._logger.debug(
"CatchupSubscription started on %s (%s)", self.stream, self.conversation_id
)
async def drop_subscription(self, response: InboundMessage) -> None:
body = proto.SubscriptionDropped()
body.ParseFromString(response.payload)
if body.reason == messages.SubscriptionDropReason.Unsubscribed:
await self.subscription.events.enqueue(StopAsyncIteration())
return
if self.result.done():
await self.error(
exceptions.SubscriptionFailed(self.conversation_id, body.reason)
)
return
await self.error(
exceptions.SubscriptionCreationFailed(self.conversation_id, body.reason)
)
@property
def is_live(self):
return self.phase == CatchupSubscriptionPhase.LIVE
async def _move_to_next_phase(self, output):
if self.phase == CatchupSubscriptionPhase.READ_HISTORICAL:
self.phase = CatchupSubscriptionPhase.CATCH_UP
self._logger.info(
"Caught up with historical events, creating volatile subscription"
)
await self._subscribe(output)
elif self.phase == CatchupSubscriptionPhase.CATCH_UP:
self.phase = CatchupSubscriptionPhase.LIVE
await self._yield_events(self.buffer)
async def reply_from_live(self, message, output):
if message.command == TcpCommand.SubscriptionDropped:
await self.drop_subscription(message)
return
self.expect_only(message, TcpCommand.StreamEventAppeared)
result = proto.StreamEventAppeared()
result.ParseFromString(message.payload)
await self._yield_events([_make_event(result.event)])
async def reply_from_reconnect(self, message: InboundMessage, output: Queue):
if message.command != TcpCommand.SubscriptionDropped:
return
self.phase = CatchupSubscriptionPhase.READ_HISTORICAL
await self.start(output)
async def _subscribe(self, output: Queue) -> None:
msg = proto.SubscribeToStream()
msg.event_stream_id = self.stream
msg.resolve_link_tos = self.resolve_link_tos
await output.put(
OutboundMessage(
self.conversation_id,
TcpCommand.SubscribeToStream,
msg.SerializeToString(),
self.credential,
)
)
class CatchupSubscription(__catchup):
def __init__(
self,
stream,
start_from=0,
batch_size=100,
credential=None,
conversation_id=None,
):
self.stream = stream
self.iterator = StreamingIterator()
self.conversation_id = conversation_id or uuid4()
self._logger = logging.get_named_logger(
CatchupSubscription, self.conversation_id
)
self.from_event = start_from
self.direction = StreamDirection.Forward
self.batch_size = batch_size
self.has_first_page = False
self.require_master = False
self.resolve_link_tos = True
self.credential = credential
self.result = Future()
self.phase = CatchupSubscriptionPhase.READ_HISTORICAL
self.buffer = []
self.subscribe_from = -1
self.next_event_number = self.from_event
self.last_event_number = -1
super().__init__(conversation_id, credential)
async def start(self, output):
if self.phase > CatchupSubscriptionPhase.READ_HISTORICAL:
self._logger.info("Tear down previous subscription")
await self.reconnect(output)
return
self.from_event = max(
self.from_event, self.next_event_number, self.last_event_number
)
self._logger.info("Starting catchup subscription at %s", self.from_event)
await output.put(page_stream_message(self, self.from_event))
logging.debug(
"CatchupSubscription started on %s (%s)", self.stream, self.conversation_id
)
async def _yield_events(self, events):
for event in events:
if event.event_number <= self.last_event_number:
continue
await self.iterator.asend(event)
self.last_event_number = event.event_number
async def reply_from_catch_up(self, message, output):
if message.command == TcpCommand.SubscriptionDropped:
await self.drop_subscription(message)
elif message.command == TcpCommand.SubscriptionConfirmation:
confirmation = proto.SubscriptionConfirmation()
confirmation.ParseFromString(message.payload)
self.subscribe_from = confirmation.last_event_number
self._logger.info(
"Subscribed successfully, catching up with missed events from %s",
self.next_event_number,
)
await output.put(page_stream_message(self, self.next_event_number))
elif message.command == TcpCommand.StreamEventAppeared:
result = proto.StreamEventAppeared()
result.ParseFromString(message.payload)
self.buffer.append(_make_event(result.event))
else:
self.expect_only(message, TcpCommand.ReadStreamEventsForwardCompleted)
result = ReadStreamEventsCompleted(message)
await result.dispatch(self, output)
async def reply(self, message: InboundMessage, output: Queue):
if self.phase == CatchupSubscriptionPhase.READ_HISTORICAL:
self.expect_only(message, TcpCommand.ReadStreamEventsForwardCompleted)
result = ReadStreamEventsCompleted(message)
await result.dispatch(self, output)
elif self.phase == CatchupSubscriptionPhase.CATCH_UP:
await self.reply_from_catch_up(message, output)
elif self.phase == CatchupSubscriptionPhase.RECONNECT:
await self.reply_from_reconnect(message, output)
else:
await self.reply_from_live(message, output)
async def success(self, result: proto.ReadStreamEventsCompleted, output: Queue):
finished = False
events = []
for e in result.events:
event = _make_event(e)
events.append(event)
await self._yield_events(events)
self.next_event_number = result.next_event_number
# Todo: we should finish if the next event > subscription_start_pos
if result.is_end_of_stream:
finished = True
if not self.has_first_page:
self.subscription = VolatileSubscription(
self.conversation_id, self.stream, output, 0, 0, self.iterator
)
self.result.set_result(self.subscription)
self.has_first_page = True
if finished:
await self._move_to_next_phase(output)
else:
await output.put(page_stream_message(self, result.next_event_number))
class CatchupAllSubscription(__catchup):
name = "$all"
stream = ""
def __init__(
self, start_from=None, batch_size=100, credential=None, conversation_id=None
):
self.iterator = StreamingIterator()
self.conversation_id = conversation_id or uuid4()
self._logger = logging.get_named_logger(
CatchupAllSubscription, self.conversation_id
)
self.from_position = start_from or Position(0, 0)
self.direction = StreamDirection.Forward
self.batch_size = batch_size
self.has_first_page = False
self.require_master = False
self.resolve_link_tos = True
self.credential = credential
self.result = Future()
self.phase = CatchupSubscriptionPhase.READ_HISTORICAL
self.buffer = []
self.next_position = self.from_position
self.last_position = Position.min
super().__init__(conversation_id, credential)
async def _yield_events(self, events):
for event in events:
print(
event.position, self.last_position, event.position > self.last_position
)
if event.position > self.last_position:
await self.iterator.enqueue(event)
self.last_position = event.position
async def start(self, output):
if self.phase > CatchupSubscriptionPhase.READ_HISTORICAL:
self._logger.info("Tear down previous subscription")
await self.reconnect(output)
return
self.from_position = max(self.from_position, self.last_position)
self._logger.info("Starting catchup subscription at %s", self.from_position)
if self.direction == StreamDirection.Forward:
command = TcpCommand.ReadAllEventsForward
else:
command = TcpCommand.ReadAllEventsBackward
msg = proto.ReadAllEvents()
msg.commit_position = self.from_position.commit
msg.prepare_position = self.from_position.prepare
msg.max_count = self.batch_size
msg.resolve_link_tos = self.resolve_link_tos
msg.require_master = self.require_master
data = msg.SerializeToString()
await output.put(
OutboundMessage(self.conversation_id, command, data, self.credential)
)
self._logger.debug("CatchupAllSubscription started (%s)", self.conversation_id)
async def reply_from_catch_up(self, message, output):
if message.command == TcpCommand.SubscriptionDropped:
await self.drop_subscription(message)
elif message.command == TcpCommand.SubscriptionConfirmation:
confirmation = proto.SubscriptionConfirmation()
confirmation.ParseFromString(message.payload)
self._logger.info(
"Subscribed successfully, catching up with missed events from %s",
self.from_position,
)
await output.put(page_all_message(self, self.from_position))
elif message.command == TcpCommand.StreamEventAppeared:
result = proto.StreamEventAppeared()
result.ParseFromString(message.payload)
self.buffer.append(_make_event(result.event))
else:
result = ReadAllEventsCompleted(message)
await result.dispatch(self, output)
async def reply(self, message: InboundMessage, output: Queue):
if self.phase == CatchupSubscriptionPhase.READ_HISTORICAL:
self.expect_only(message, TcpCommand.ReadAllEventsForwardCompleted)
result = ReadAllEventsCompleted(message)
await result.dispatch(self, output)
elif self.phase == CatchupSubscriptionPhase.CATCH_UP:
await self.reply_from_catch_up(message, output)
elif self.phase == CatchupSubscriptionPhase.RECONNECT:
await self.reply_from_reconnect(message, output)
else:
await self.reply_from_live(message, output)
async def success(self, result: proto.ReadStreamEventsCompleted, output: Queue):
finished = result.commit_position == result.next_commit_position
events = []
for e in result.events:
event = _make_event(e)
events.append(event)
# Todo: we should finish if the next event > subscription_start_pos
if not self.has_first_page:
self.subscription = VolatileSubscription(
self.conversation_id, self.name, output, 0, 0, self.iterator
)
self.result.set_result(self.subscription)
self.has_first_page = True
await self._yield_events(events)
self.from_position = Position(
result.next_commit_position, result.next_prepare_position
)
if finished:
await self._move_to_next_phase(output)
else:
await output.put(page_all_message(self, self.from_position))
|
en
| 0.733437
|
Command class for writing a sequence of events to a single stream. Args: stream: The name of the stream to write to. events: A sequence of events to write. expected_version (optional): The expected version of the target stream used for concurrency control. required_master (optional): True if this command must be sent direct to the master node, otherwise False. correlation_id (optional): A unique identifer for this command. Command class for reading a single event. Args: stream: The name of the stream containing the event. event_number: The sequence number of the event to read. resolve_links (optional): True if eventstore should automatically resolve Link Events, otherwise False. required_master (optional): True if this command must be sent direct to the master node, otherwise False. correlation_id (optional): A unique identifer for this command. Command class for reading all events from a stream. Args: commit_position: The commit_position. event_number: The sequence number of the event to read. resolve_links (optional): True if eventstore should automatically resolve Link Events, otherwise False. required_master (optional): True if this command must be sent direct to the master node, otherwise False. correlation_id (optional): A unique identifer for this command. Command class for reading events from a stream. Args: stream: The name of the stream containing the event. event_number: The sequence number of the event to read. resolve_links (optional): True if eventstore should automatically resolve Link Events, otherwise False. required_master (optional): True if this command must be sent direct to the master node, otherwise False. correlation_id (optional): A unique identifer for this command. Command class for iterating all events in the database. Args: from_position (optional): The position to start reading from. Defaults to photonpump.Beginning when direction is Forward, photonpump.End when direction is Backward. batch_size (optional): The maximum number of events to read at a time. resolve_links (optional): True if eventstore should automatically resolve Link Events, otherwise False. require_master (optional): True if this command must be sent direct to the master node, otherwise False. direction (optional): Controls whether to read forward or backward through the events. Defaults to StreamDirection.Forward correlation_id (optional): A unique identifer for this command. Command class for iterating events from a stream. Args: stream: The name of the stream containing the event. resolve_links (optional): True if eventstore should automatically resolve Link Events, otherwise False. required_master (optional): True if this command must be sent direct to the master node, otherwise False. correlation_id (optional): A unique identifer for this command. # Todo: we should finish if the next event > subscription_start_pos # Todo: we should finish if the next event > subscription_start_pos
| 2.149136
| 2
|
inverted_pendulum.py
|
tderensis/digital_control
| 0
|
6625707
|
<filename>inverted_pendulum.py<gh_stars>0
"""
Design of a state space controller for an inverted pendulum driven by stepper motor.
"""
import control_plot, control_sim, control_design, control_optimize, control_eval, control_poles
from scipy import signal
import numpy as np
import math
# System Clasification Results
# motor position low pass filter (bessel with 1 sec settling time)
b_1 = 21.9
b_0 = 8.106
b_g = 21.9
g = 9.81
w0 = 4.008 # natural frequency
d = 0.0718 # damping
a_1 = w0**2
a_2 = a_1/g
# State Space Equations
"""
x = | x | - motor position (m)
| vel | - motor velocity (m/s)
| theta | - pendulum position (rad)
| theta_dot | - pendulum velocity (rad/s)
u = | x_d | - desired motor position (m)
"""
A = np.matrix([
[ 0, 1, 0, 0],
[-b_1, -b_0, 0, 0],
[ 0, 0, 0, 1],
[-b_1*a_2, -b_0*a_2, a_1, -d]
])
B = np.matrix([
[0],
[b_g],
[0],
[b_g*a_2]
])
C = np.matrix([
[1, 0, 0, 0],
[0, 0, 1, 0]
])
D = np.matrix([
[0],
[0]
])
sys_c_ol = signal.StateSpace(A, B, C, D)
print(sys_c_ol)
T = 0.05 # sampling time
Ts = 1.2 # settling time
Tso = Ts/6
print("Using T =", T, "Ts =", Ts, "Tso = ", Tso)
spoles = [
(-4.053+2.34j), (-4.053-2.34j), (-4.044060776465936+0j), (-3.9722607764659337+0j)
]
(sys_d_ol, L, K) = control_design.design_regob(sys_c_ol, T, Ts, Tso, spoles)
phi = sys_d_ol.A
gamma = sys_d_ol.B
print("phi =\n", phi)
print("gamma =\n", gamma)
print("L =\n", L)
print("K =\n", K)
(phi_ltf, gamma_ltf, c_ltf) = control_eval.ltf_regsf(sys_d_ol, L)
print("Stability assuming all states are measured")
control_eval.print_stability_margins(phi_ltf, gamma_ltf, c_ltf)
(phi_ltf, gamma_ltf, c_ltf) = control_eval.ltf_regob(sys_d_ol, L, K)
print("Stability using a full order observer")
control_eval.print_stability_margins(phi_ltf, gamma_ltf, c_ltf)
x0 = np.zeros((1, 4))
x0[0,1] = 20/math.pi
(t, u, x) = control_sim.sim_regsf(phi, gamma, L, T, x0, Ts*2)
print("reg settling time = ", control_eval.settling_time(t, x))
control_plot.plot_regsf(t, u, x)
(t, u, x, xhat, y) = control_sim.sim_regob(phi, gamma, C, L, K, T, x0, Ts*2)
print("fob settling time = ", control_eval.settling_time(t, y))
control_plot.plot_regob(t, u, x, xhat, y)
# Add a pole for the tracking system
spoles = spoles + control_poles.bessel_spoles(1, Ts)
# Only position is tracked
Ca = np.matrix([ 1, 0, 0, 0 ])
(sys_d_ol, phia, gammaa, L1, L2, K) = control_design.design_tsob(sys_c_ol, Ca, T, Ts, Tso, spoles)
print("phia = ", phia)
print("gammaa = ", gammaa)
print("L1 = ", L1)
print("L2 = ", L2)
print("K =\n", K)
(phi_ltf, gamma_ltf, c_ltf) = control_eval.ltf_tssf(sys_d_ol, phia, gammaa, Ca, L1, L2)
print("Stability using a tracking system with full state feedback.")
control_eval.print_stability_margins(phi_ltf, gamma_ltf, c_ltf)
(phi_ltf, gamma_ltf, c_ltf) = control_eval.ltf_tsob(sys_d_ol, phia, gammaa, Ca, L1, L2, K)
print("Stability using a tracking system with full order observer")
control_eval.print_stability_margins(phi_ltf, gamma_ltf, c_ltf)
|
<filename>inverted_pendulum.py<gh_stars>0
"""
Design of a state space controller for an inverted pendulum driven by stepper motor.
"""
import control_plot, control_sim, control_design, control_optimize, control_eval, control_poles
from scipy import signal
import numpy as np
import math
# System Clasification Results
# motor position low pass filter (bessel with 1 sec settling time)
b_1 = 21.9
b_0 = 8.106
b_g = 21.9
g = 9.81
w0 = 4.008 # natural frequency
d = 0.0718 # damping
a_1 = w0**2
a_2 = a_1/g
# State Space Equations
"""
x = | x | - motor position (m)
| vel | - motor velocity (m/s)
| theta | - pendulum position (rad)
| theta_dot | - pendulum velocity (rad/s)
u = | x_d | - desired motor position (m)
"""
A = np.matrix([
[ 0, 1, 0, 0],
[-b_1, -b_0, 0, 0],
[ 0, 0, 0, 1],
[-b_1*a_2, -b_0*a_2, a_1, -d]
])
B = np.matrix([
[0],
[b_g],
[0],
[b_g*a_2]
])
C = np.matrix([
[1, 0, 0, 0],
[0, 0, 1, 0]
])
D = np.matrix([
[0],
[0]
])
sys_c_ol = signal.StateSpace(A, B, C, D)
print(sys_c_ol)
T = 0.05 # sampling time
Ts = 1.2 # settling time
Tso = Ts/6
print("Using T =", T, "Ts =", Ts, "Tso = ", Tso)
spoles = [
(-4.053+2.34j), (-4.053-2.34j), (-4.044060776465936+0j), (-3.9722607764659337+0j)
]
(sys_d_ol, L, K) = control_design.design_regob(sys_c_ol, T, Ts, Tso, spoles)
phi = sys_d_ol.A
gamma = sys_d_ol.B
print("phi =\n", phi)
print("gamma =\n", gamma)
print("L =\n", L)
print("K =\n", K)
(phi_ltf, gamma_ltf, c_ltf) = control_eval.ltf_regsf(sys_d_ol, L)
print("Stability assuming all states are measured")
control_eval.print_stability_margins(phi_ltf, gamma_ltf, c_ltf)
(phi_ltf, gamma_ltf, c_ltf) = control_eval.ltf_regob(sys_d_ol, L, K)
print("Stability using a full order observer")
control_eval.print_stability_margins(phi_ltf, gamma_ltf, c_ltf)
x0 = np.zeros((1, 4))
x0[0,1] = 20/math.pi
(t, u, x) = control_sim.sim_regsf(phi, gamma, L, T, x0, Ts*2)
print("reg settling time = ", control_eval.settling_time(t, x))
control_plot.plot_regsf(t, u, x)
(t, u, x, xhat, y) = control_sim.sim_regob(phi, gamma, C, L, K, T, x0, Ts*2)
print("fob settling time = ", control_eval.settling_time(t, y))
control_plot.plot_regob(t, u, x, xhat, y)
# Add a pole for the tracking system
spoles = spoles + control_poles.bessel_spoles(1, Ts)
# Only position is tracked
Ca = np.matrix([ 1, 0, 0, 0 ])
(sys_d_ol, phia, gammaa, L1, L2, K) = control_design.design_tsob(sys_c_ol, Ca, T, Ts, Tso, spoles)
print("phia = ", phia)
print("gammaa = ", gammaa)
print("L1 = ", L1)
print("L2 = ", L2)
print("K =\n", K)
(phi_ltf, gamma_ltf, c_ltf) = control_eval.ltf_tssf(sys_d_ol, phia, gammaa, Ca, L1, L2)
print("Stability using a tracking system with full state feedback.")
control_eval.print_stability_margins(phi_ltf, gamma_ltf, c_ltf)
(phi_ltf, gamma_ltf, c_ltf) = control_eval.ltf_tsob(sys_d_ol, phia, gammaa, Ca, L1, L2, K)
print("Stability using a tracking system with full order observer")
control_eval.print_stability_margins(phi_ltf, gamma_ltf, c_ltf)
|
en
| 0.733788
|
Design of a state space controller for an inverted pendulum driven by stepper motor. # System Clasification Results # motor position low pass filter (bessel with 1 sec settling time) # natural frequency # damping # State Space Equations x = | x | - motor position (m)
| vel | - motor velocity (m/s)
| theta | - pendulum position (rad)
| theta_dot | - pendulum velocity (rad/s)
u = | x_d | - desired motor position (m) # sampling time # settling time # Add a pole for the tracking system # Only position is tracked
| 2.899764
| 3
|
beem/nodelist.py
|
MWFIAE/beem
| 0
|
6625708
|
<reponame>MWFIAE/beem<gh_stars>0
# This Python file uses the following encoding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import next
import re
import time
import math
import json
from beem.instance import shared_steem_instance
from beem.account import Account
import logging
log = logging.getLogger(__name__)
class NodeList(list):
""" Returns a node list
.. code-block:: python
from beem.nodelist import NodeList
n = NodeList()
nodes_urls = n.get_nodes()
"""
def __init__(self):
nodes = [
{
"url": "https://api.steemit.com",
"version": "0.19.11",
"type": "appbase",
"owner": "steemit",
"score": 100
},
{
"url": "wss://appbasetest.timcliff.com",
"version": "0.19.11",
"type": "appbase",
"owner": "timcliff",
"score": 20
},
{
"url": "https://appbasetest.timcliff.com",
"version": "0.19.11",
"type": "appbase",
"owner": "timcliff",
"score": 10
},
{
"url": "https://api.steem.house",
"version": "0.19.11",
"type": "appbase",
"owner": "gtg",
"score": 90
},
{
"url": "https://api.steemitdev.com",
"version": "0.19.11",
"type": "appbase-dev",
"owner": "steemit",
"score": 100
},
{
"url": "https://api.steemitstage.com",
"version": "0.19.11",
"type": "appbase-dev",
"owner": "steemit",
"score": 110
},
{
"url": "wss://rpc.steemviz.com",
"version": "0.19.6",
"type": "normal",
"owner": "ausbitbank",
"score": 1
},
{
"url": "https://rpc.steemviz.com",
"version": "0.19.6",
"type": "normal",
"owner": "ausbitbank",
"score": 1
},
{
"url": "wss://steemd.privex.io",
"version": "0.19.5",
"type": "normal",
"owner": "privex",
"score": 150
},
{
"url": "https://steemd.privex.io",
"version": "0.19.5",
"type": "normal",
"owner": "privex",
"score": 50
},
{
"url": "wss://rpc.buildteam.io",
"version": "0.19.5",
"type": "normal",
"owner": "themarkymark",
"score": 165
},
{
"url": "https://rpc.buildteam.io",
"version": "0.19.5",
"type": "normal",
"owner": "themarkymark",
"score": 120
},
{
"url": "wss://appbase.buildteam.io",
"version": "0.19.11",
"type": "appbase",
"owner": "themarkymark",
"score": 165
},
{
"url": "https://appbase.buildteam.io",
"version": "0.19.11",
"type": "appbase",
"owner": "themarkymark",
"score": 120
},
{
"url": "wss://gtg.steem.house:8090",
"version": "0.19.5",
"type": "normal",
"owner": "gtg",
"score": 75
},
{
"url": "https://gtg.steem.house:8090",
"version": "0.19.5",
"type": "normal",
"owner": "gtg",
"score": 80
},
{
"url": "wss://steemd.pevo.science",
"version": "0.19.2",
"type": "normal",
"owner": "pharesim",
"score": -10
},
{
"url": "https://steemd.pevo.science",
"version": "0.19.6",
"type": "normal",
"owner": "pharesim",
"score": -10
},
{
"url": "wss://rpc.steemliberator.com",
"version": "0.19.5",
"type": "normal",
"owner": "netuoso",
"score": 50
},
{
"url": "https://rpc.steemliberator.com",
"version": "0.19.5",
"type": "normal",
"owner": "netuoso",
"score": 20
},
{
"url": "wss://seed.bitcoiner.me",
"version": "0.19.6",
"type": "normal",
"owner": "bitcoiner",
"score": -10
},
{
"url": "https://seed.bitcoiner.me",
"version": "0.19.6",
"type": "normal",
"owner": "bitcoiner",
"score": -10
},
{
"url": "wss://steemd.steemgigs.org",
"version": "0.19.6",
"type": "normal",
"owner": "steemgigs",
"score": -10
},
{
"url": "https://steemd.steemgigs.org",
"version": "0.19.6",
"type": "normal",
"owner": "steemgigs",
"score": -10
},
{
"url": "wss://steemd.minnowsupportproject.org",
"version": "0.19.11",
"type": "appbase",
"owner": "followbtcnews",
"score": 10
},
{
"url": "https://steemd.minnowsupportproject.org",
"version": "0.19.11",
"type": "appbase",
"owner": "followbtcnews",
"score": 10
},
{
"url": "https://rpc.curiesteem.com",
"version": "0.19.5",
"type": "normal",
"owner": "curie",
"score": 50
},
{
"url": "wss://testnet.steem.vc",
"version": "0.19.2",
"type": "testnet",
"owner": "almost-digital",
"score": 20
},
{
"url": "ws://testnet.steem.vc",
"version": "0.19.2",
"type": "testnet",
"owner": "almost-digital",
"score": 5
},
{
"url": "https://testnet.steem.vc",
"version": "0.19.2",
"type": "testnet",
"owner": "almost-digital",
"score": 10
},
{
"url": "http://testnet.steem.vc",
"version": "0.19.2",
"type": "testnet",
"owner": "almost-digital",
"score": 5
},
{
"url": "https://testnet.steemitdev.com",
"version": "0.21.0",
"type": "testnet-dev",
"owner": "steemit",
"score": 5
}]
super(NodeList, self).__init__(nodes)
def update_nodes(self, weights=None, steem_instance=None):
""" Reads metadata from fullnodeupdate and recalculates the nodes score
:params list/dict weight: can be used to weight the different benchmarks
.. code-block:: python
from beem.nodelist import NodeList
nl = NodeList()
weights = [0, 0.1, 0.2, 1]
nl.update_nodes(weights)
weights = {'block': 0.1, 'history': 0.1, 'apicall': 1, 'config': 1}
nl.update_nodes(weights)
"""
steem = steem_instance or shared_steem_instance()
account = Account("fullnodeupdate", steem_instance=steem)
metadata = json.loads(account["json_metadata"])
report = metadata["report"]
failing_nodes = metadata["failing_nodes"]
parameter = metadata["parameter"]
benchmarks = parameter["benchmarks"]
if weights is None:
weights_dict = {}
for benchmark in benchmarks:
weights_dict[benchmark] = (1. / len(benchmarks))
elif isinstance(weights, list):
weights_dict = {}
i = 0
weight_sum = 0
for w in weights:
weight_sum += w
for benchmark in benchmarks:
if i < len(weights):
weights_dict[benchmark] = weights[i] / weight_sum
else:
weights_dict[benchmark] = 0.
i += 1
elif isinstance(weights, dict):
weights_dict = {}
i = 0
weight_sum = 0
for w in weights:
weight_sum += weights[w]
for benchmark in benchmarks:
if benchmark in weights:
weights_dict[benchmark] = weights[benchmark] / weight_sum
else:
weights_dict[benchmark] = 0.
max_score = len(report) + 1
new_nodes = []
for node in self:
new_node = node.copy()
for report_node in report:
if node["url"] == report_node["node"]:
new_node["version"] = report_node["version"]
scores = []
for benchmark in benchmarks:
result = report_node[benchmark]
rank = result["rank"]
if not result["ok"]:
rank = max_score + 1
score = (max_score - rank) / (max_score - 1) * 100
weighted_score = score * weights_dict[benchmark]
scores.append(weighted_score)
sum_score = 0
for score in scores:
sum_score += score
new_node["score"] = sum_score
for node_failing in failing_nodes:
if node["url"] == node_failing:
new_node["score"] = -1
new_nodes.append(new_node)
super(NodeList, self).__init__(new_nodes)
def get_nodes(self, normal=True, appbase=True, dev=False, testnet=False, testnetdev=False, wss=True, https=True):
""" Returns nodes as list
:param bool normal: when True, nodes with version 0.19.5 are included
:param bool appbase: when True, nodes with version 0.19.11 are included
:param bool dev: when True, dev nodes with version 0.19.11 are included
:param bool testnet: when True, testnet nodes are included
:param bool testnetdev: When True, testnet-dev nodes are included
"""
node_list = []
node_type_list = []
if normal:
node_type_list.append("normal")
if appbase:
node_type_list.append("appbase")
if dev:
node_type_list.append("appbase-dev")
if testnet:
node_type_list.append("testnet")
if testnetdev:
node_type_list.append("testnet-dev")
for node in self:
if node["type"] in node_type_list and node["score"] >= 0:
if not https and node["url"][:5] == 'https':
continue
if not wss and node["url"][:3] == 'wss':
continue
node_list.append(node)
return [node["url"] for node in sorted(node_list, key=lambda self: self['score'], reverse=True)]
def get_testnet(self, testnet=True, testnetdev=False):
"""Returns testnet nodes"""
return self.get_nodes(normal=False, appbase=False, testnet=testnet, testnetdev=testnetdev)
|
# This Python file uses the following encoding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import next
import re
import time
import math
import json
from beem.instance import shared_steem_instance
from beem.account import Account
import logging
log = logging.getLogger(__name__)
class NodeList(list):
""" Returns a node list
.. code-block:: python
from beem.nodelist import NodeList
n = NodeList()
nodes_urls = n.get_nodes()
"""
def __init__(self):
nodes = [
{
"url": "https://api.steemit.com",
"version": "0.19.11",
"type": "appbase",
"owner": "steemit",
"score": 100
},
{
"url": "wss://appbasetest.timcliff.com",
"version": "0.19.11",
"type": "appbase",
"owner": "timcliff",
"score": 20
},
{
"url": "https://appbasetest.timcliff.com",
"version": "0.19.11",
"type": "appbase",
"owner": "timcliff",
"score": 10
},
{
"url": "https://api.steem.house",
"version": "0.19.11",
"type": "appbase",
"owner": "gtg",
"score": 90
},
{
"url": "https://api.steemitdev.com",
"version": "0.19.11",
"type": "appbase-dev",
"owner": "steemit",
"score": 100
},
{
"url": "https://api.steemitstage.com",
"version": "0.19.11",
"type": "appbase-dev",
"owner": "steemit",
"score": 110
},
{
"url": "wss://rpc.steemviz.com",
"version": "0.19.6",
"type": "normal",
"owner": "ausbitbank",
"score": 1
},
{
"url": "https://rpc.steemviz.com",
"version": "0.19.6",
"type": "normal",
"owner": "ausbitbank",
"score": 1
},
{
"url": "wss://steemd.privex.io",
"version": "0.19.5",
"type": "normal",
"owner": "privex",
"score": 150
},
{
"url": "https://steemd.privex.io",
"version": "0.19.5",
"type": "normal",
"owner": "privex",
"score": 50
},
{
"url": "wss://rpc.buildteam.io",
"version": "0.19.5",
"type": "normal",
"owner": "themarkymark",
"score": 165
},
{
"url": "https://rpc.buildteam.io",
"version": "0.19.5",
"type": "normal",
"owner": "themarkymark",
"score": 120
},
{
"url": "wss://appbase.buildteam.io",
"version": "0.19.11",
"type": "appbase",
"owner": "themarkymark",
"score": 165
},
{
"url": "https://appbase.buildteam.io",
"version": "0.19.11",
"type": "appbase",
"owner": "themarkymark",
"score": 120
},
{
"url": "wss://gtg.steem.house:8090",
"version": "0.19.5",
"type": "normal",
"owner": "gtg",
"score": 75
},
{
"url": "https://gtg.steem.house:8090",
"version": "0.19.5",
"type": "normal",
"owner": "gtg",
"score": 80
},
{
"url": "wss://steemd.pevo.science",
"version": "0.19.2",
"type": "normal",
"owner": "pharesim",
"score": -10
},
{
"url": "https://steemd.pevo.science",
"version": "0.19.6",
"type": "normal",
"owner": "pharesim",
"score": -10
},
{
"url": "wss://rpc.steemliberator.com",
"version": "0.19.5",
"type": "normal",
"owner": "netuoso",
"score": 50
},
{
"url": "https://rpc.steemliberator.com",
"version": "0.19.5",
"type": "normal",
"owner": "netuoso",
"score": 20
},
{
"url": "wss://seed.bitcoiner.me",
"version": "0.19.6",
"type": "normal",
"owner": "bitcoiner",
"score": -10
},
{
"url": "https://seed.bitcoiner.me",
"version": "0.19.6",
"type": "normal",
"owner": "bitcoiner",
"score": -10
},
{
"url": "wss://steemd.steemgigs.org",
"version": "0.19.6",
"type": "normal",
"owner": "steemgigs",
"score": -10
},
{
"url": "https://steemd.steemgigs.org",
"version": "0.19.6",
"type": "normal",
"owner": "steemgigs",
"score": -10
},
{
"url": "wss://steemd.minnowsupportproject.org",
"version": "0.19.11",
"type": "appbase",
"owner": "followbtcnews",
"score": 10
},
{
"url": "https://steemd.minnowsupportproject.org",
"version": "0.19.11",
"type": "appbase",
"owner": "followbtcnews",
"score": 10
},
{
"url": "https://rpc.curiesteem.com",
"version": "0.19.5",
"type": "normal",
"owner": "curie",
"score": 50
},
{
"url": "wss://testnet.steem.vc",
"version": "0.19.2",
"type": "testnet",
"owner": "almost-digital",
"score": 20
},
{
"url": "ws://testnet.steem.vc",
"version": "0.19.2",
"type": "testnet",
"owner": "almost-digital",
"score": 5
},
{
"url": "https://testnet.steem.vc",
"version": "0.19.2",
"type": "testnet",
"owner": "almost-digital",
"score": 10
},
{
"url": "http://testnet.steem.vc",
"version": "0.19.2",
"type": "testnet",
"owner": "almost-digital",
"score": 5
},
{
"url": "https://testnet.steemitdev.com",
"version": "0.21.0",
"type": "testnet-dev",
"owner": "steemit",
"score": 5
}]
super(NodeList, self).__init__(nodes)
def update_nodes(self, weights=None, steem_instance=None):
""" Reads metadata from fullnodeupdate and recalculates the nodes score
:params list/dict weight: can be used to weight the different benchmarks
.. code-block:: python
from beem.nodelist import NodeList
nl = NodeList()
weights = [0, 0.1, 0.2, 1]
nl.update_nodes(weights)
weights = {'block': 0.1, 'history': 0.1, 'apicall': 1, 'config': 1}
nl.update_nodes(weights)
"""
steem = steem_instance or shared_steem_instance()
account = Account("fullnodeupdate", steem_instance=steem)
metadata = json.loads(account["json_metadata"])
report = metadata["report"]
failing_nodes = metadata["failing_nodes"]
parameter = metadata["parameter"]
benchmarks = parameter["benchmarks"]
if weights is None:
weights_dict = {}
for benchmark in benchmarks:
weights_dict[benchmark] = (1. / len(benchmarks))
elif isinstance(weights, list):
weights_dict = {}
i = 0
weight_sum = 0
for w in weights:
weight_sum += w
for benchmark in benchmarks:
if i < len(weights):
weights_dict[benchmark] = weights[i] / weight_sum
else:
weights_dict[benchmark] = 0.
i += 1
elif isinstance(weights, dict):
weights_dict = {}
i = 0
weight_sum = 0
for w in weights:
weight_sum += weights[w]
for benchmark in benchmarks:
if benchmark in weights:
weights_dict[benchmark] = weights[benchmark] / weight_sum
else:
weights_dict[benchmark] = 0.
max_score = len(report) + 1
new_nodes = []
for node in self:
new_node = node.copy()
for report_node in report:
if node["url"] == report_node["node"]:
new_node["version"] = report_node["version"]
scores = []
for benchmark in benchmarks:
result = report_node[benchmark]
rank = result["rank"]
if not result["ok"]:
rank = max_score + 1
score = (max_score - rank) / (max_score - 1) * 100
weighted_score = score * weights_dict[benchmark]
scores.append(weighted_score)
sum_score = 0
for score in scores:
sum_score += score
new_node["score"] = sum_score
for node_failing in failing_nodes:
if node["url"] == node_failing:
new_node["score"] = -1
new_nodes.append(new_node)
super(NodeList, self).__init__(new_nodes)
def get_nodes(self, normal=True, appbase=True, dev=False, testnet=False, testnetdev=False, wss=True, https=True):
""" Returns nodes as list
:param bool normal: when True, nodes with version 0.19.5 are included
:param bool appbase: when True, nodes with version 0.19.11 are included
:param bool dev: when True, dev nodes with version 0.19.11 are included
:param bool testnet: when True, testnet nodes are included
:param bool testnetdev: When True, testnet-dev nodes are included
"""
node_list = []
node_type_list = []
if normal:
node_type_list.append("normal")
if appbase:
node_type_list.append("appbase")
if dev:
node_type_list.append("appbase-dev")
if testnet:
node_type_list.append("testnet")
if testnetdev:
node_type_list.append("testnet-dev")
for node in self:
if node["type"] in node_type_list and node["score"] >= 0:
if not https and node["url"][:5] == 'https':
continue
if not wss and node["url"][:3] == 'wss':
continue
node_list.append(node)
return [node["url"] for node in sorted(node_list, key=lambda self: self['score'], reverse=True)]
def get_testnet(self, testnet=True, testnetdev=False):
"""Returns testnet nodes"""
return self.get_nodes(normal=False, appbase=False, testnet=testnet, testnetdev=testnetdev)
|
en
| 0.692638
|
# This Python file uses the following encoding: utf-8 Returns a node list .. code-block:: python from beem.nodelist import NodeList n = NodeList() nodes_urls = n.get_nodes() Reads metadata from fullnodeupdate and recalculates the nodes score :params list/dict weight: can be used to weight the different benchmarks .. code-block:: python from beem.nodelist import NodeList nl = NodeList() weights = [0, 0.1, 0.2, 1] nl.update_nodes(weights) weights = {'block': 0.1, 'history': 0.1, 'apicall': 1, 'config': 1} nl.update_nodes(weights) Returns nodes as list :param bool normal: when True, nodes with version 0.19.5 are included :param bool appbase: when True, nodes with version 0.19.11 are included :param bool dev: when True, dev nodes with version 0.19.11 are included :param bool testnet: when True, testnet nodes are included :param bool testnetdev: When True, testnet-dev nodes are included Returns testnet nodes
| 2.276386
| 2
|
components/cronet/PRESUBMIT.py
|
metux/chromium-deb
| 0
|
6625709
|
<reponame>metux/chromium-deb<filename>components/cronet/PRESUBMIT.py
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for src/components/cronet.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
def _PyLintChecks(input_api, output_api):
pylint_checks = input_api.canned_checks.GetPylint(input_api, output_api,
extra_paths_list=_GetPathsToPrepend(input_api), pylintrc='pylintrc')
return input_api.RunTests(pylint_checks)
def _GetPathsToPrepend(input_api):
current_dir = input_api.PresubmitLocalPath()
chromium_src_dir = input_api.os_path.join(current_dir, '..', '..')
return [
input_api.os_path.join(current_dir, 'tools'),
input_api.os_path.join(current_dir, 'android', 'test', 'javaperftests'),
input_api.os_path.join(chromium_src_dir, 'tools', 'perf'),
input_api.os_path.join(chromium_src_dir, 'build', 'android'),
input_api.os_path.join(chromium_src_dir, 'build', 'android', 'gyp', 'util'),
input_api.os_path.join(chromium_src_dir, 'net', 'tools', 'net_docs'),
input_api.os_path.join(chromium_src_dir, 'tools'),
input_api.os_path.join(chromium_src_dir, 'third_party'),
input_api.os_path.join(chromium_src_dir,
'third_party', 'catapult', 'telemetry'),
input_api.os_path.join(chromium_src_dir,
'third_party', 'catapult', 'devil'),
]
def _PackageChecks(input_api, output_api):
"""Verify API classes are in org.chromium.net package, and implementation
classes are not in org.chromium.net package."""
api_file_pattern = input_api.re.compile(
r'^components/cronet/android/api/.*\.(java|template)$')
impl_file_pattern = input_api.re.compile(
r'^components/cronet/android/java/.*\.(java|template)$')
api_package_pattern = input_api.re.compile(r'^package (?!org.chromium.net;)')
impl_package_pattern = input_api.re.compile(r'^package org.chromium.net;')
source_filter = lambda path: input_api.FilterSourceFile(path,
white_list=[r'^components/cronet/android/.*\.(java|template)$'])
problems = []
for f in input_api.AffectedSourceFiles(source_filter):
local_path = f.LocalPath()
for line_number, line in f.ChangedContents():
if (api_file_pattern.search(local_path)):
if (api_package_pattern.search(line)):
problems.append(
'%s:%d\n %s' % (local_path, line_number, line.strip()))
elif (impl_file_pattern.search(local_path)):
if (impl_package_pattern.search(line)):
problems.append(
'%s:%d\n %s' % (local_path, line_number, line.strip()))
if problems:
return [output_api.PresubmitError(
'API classes must be in org.chromium.net package, and implementation\n'
'classes must not be in org.chromium.net package.',
problems)]
else:
return []
def _RunUnittests(input_api, output_api):
return input_api.canned_checks.RunUnitTestsInDirectory(
input_api, output_api, '.', [ r'^.+_unittest\.py$'])
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_PyLintChecks(input_api, output_api))
results.extend(_PackageChecks(input_api, output_api))
results.extend(_RunUnittests(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
return _RunUnittests(input_api, output_api)
def PostUploadHook(cl, change, output_api):
"""git cl upload will call this hook after the issue is created/modified.
This hook adds an extra try bot to the CL description in order to run Cronet
tests in addition to CQ try bots.
"""
return output_api.EnsureCQIncludeTrybotsAreAdded(
cl,
[
'master.tryserver.chromium.android:android_cronet_tester',
],
'Automatically added Cronet trybot to run tests on CQ.')
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for src/components/cronet.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
def _PyLintChecks(input_api, output_api):
pylint_checks = input_api.canned_checks.GetPylint(input_api, output_api,
extra_paths_list=_GetPathsToPrepend(input_api), pylintrc='pylintrc')
return input_api.RunTests(pylint_checks)
def _GetPathsToPrepend(input_api):
current_dir = input_api.PresubmitLocalPath()
chromium_src_dir = input_api.os_path.join(current_dir, '..', '..')
return [
input_api.os_path.join(current_dir, 'tools'),
input_api.os_path.join(current_dir, 'android', 'test', 'javaperftests'),
input_api.os_path.join(chromium_src_dir, 'tools', 'perf'),
input_api.os_path.join(chromium_src_dir, 'build', 'android'),
input_api.os_path.join(chromium_src_dir, 'build', 'android', 'gyp', 'util'),
input_api.os_path.join(chromium_src_dir, 'net', 'tools', 'net_docs'),
input_api.os_path.join(chromium_src_dir, 'tools'),
input_api.os_path.join(chromium_src_dir, 'third_party'),
input_api.os_path.join(chromium_src_dir,
'third_party', 'catapult', 'telemetry'),
input_api.os_path.join(chromium_src_dir,
'third_party', 'catapult', 'devil'),
]
def _PackageChecks(input_api, output_api):
"""Verify API classes are in org.chromium.net package, and implementation
classes are not in org.chromium.net package."""
api_file_pattern = input_api.re.compile(
r'^components/cronet/android/api/.*\.(java|template)$')
impl_file_pattern = input_api.re.compile(
r'^components/cronet/android/java/.*\.(java|template)$')
api_package_pattern = input_api.re.compile(r'^package (?!org.chromium.net;)')
impl_package_pattern = input_api.re.compile(r'^package org.chromium.net;')
source_filter = lambda path: input_api.FilterSourceFile(path,
white_list=[r'^components/cronet/android/.*\.(java|template)$'])
problems = []
for f in input_api.AffectedSourceFiles(source_filter):
local_path = f.LocalPath()
for line_number, line in f.ChangedContents():
if (api_file_pattern.search(local_path)):
if (api_package_pattern.search(line)):
problems.append(
'%s:%d\n %s' % (local_path, line_number, line.strip()))
elif (impl_file_pattern.search(local_path)):
if (impl_package_pattern.search(line)):
problems.append(
'%s:%d\n %s' % (local_path, line_number, line.strip()))
if problems:
return [output_api.PresubmitError(
'API classes must be in org.chromium.net package, and implementation\n'
'classes must not be in org.chromium.net package.',
problems)]
else:
return []
def _RunUnittests(input_api, output_api):
return input_api.canned_checks.RunUnitTestsInDirectory(
input_api, output_api, '.', [ r'^.+_unittest\.py$'])
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_PyLintChecks(input_api, output_api))
results.extend(_PackageChecks(input_api, output_api))
results.extend(_RunUnittests(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
return _RunUnittests(input_api, output_api)
def PostUploadHook(cl, change, output_api):
"""git cl upload will call this hook after the issue is created/modified.
This hook adds an extra try bot to the CL description in order to run Cronet
tests in addition to CQ try bots.
"""
return output_api.EnsureCQIncludeTrybotsAreAdded(
cl,
[
'master.tryserver.chromium.android:android_cronet_tester',
],
'Automatically added Cronet trybot to run tests on CQ.')
|
en
| 0.855897
|
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. Top-level presubmit script for src/components/cronet. See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for more details about the presubmit API built into depot_tools. Verify API classes are in org.chromium.net package, and implementation classes are not in org.chromium.net package. git cl upload will call this hook after the issue is created/modified. This hook adds an extra try bot to the CL description in order to run Cronet tests in addition to CQ try bots.
| 1.596515
| 2
|
setup.py
|
getaaron/athenacli
| 0
|
6625710
|
<reponame>getaaron/athenacli
#!/usr/bin/env python
import re
import ast
from setuptools import setup, find_packages
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('athenacli/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
description = 'CLI for Athena Database. With auto-completion and syntax highlighting.'
with open("README.md", "r") as fh:
long_description = fh.read()
install_requirements = [
'click>=7.0',
'Pygments>=1.6',
'prompt_toolkit>=2.0.6,<3.0.0',
'sqlparse>=0.3.0,<0.4.0',
'configobj>=5.0.5',
'cli_helpers[styles]>=1.1.0',
'botocore>=1.5.52',
'boto3>=1.4.4',
'PyAthena>=1.2.2',
]
setup(
name='athenacli',
author='athenacli Core Team',
author_email="<EMAIL>",
version=version,
packages=find_packages(),
package_data={
'athenacli': [
'athenaclirc',
'packages/literals/literals.json'
]
},
description=description,
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/dbcli/athenacli",
install_requires=install_requirements,
entry_points={
'console_scripts': ['athenacli = athenacli.main:cli'],
},
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: SQL',
'Topic :: Database',
'Topic :: Database :: Front-Ends',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
#!/usr/bin/env python
import re
import ast
from setuptools import setup, find_packages
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('athenacli/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
description = 'CLI for Athena Database. With auto-completion and syntax highlighting.'
with open("README.md", "r") as fh:
long_description = fh.read()
install_requirements = [
'click>=7.0',
'Pygments>=1.6',
'prompt_toolkit>=2.0.6,<3.0.0',
'sqlparse>=0.3.0,<0.4.0',
'configobj>=5.0.5',
'cli_helpers[styles]>=1.1.0',
'botocore>=1.5.52',
'boto3>=1.4.4',
'PyAthena>=1.2.2',
]
setup(
name='athenacli',
author='athenacli Core Team',
author_email="<EMAIL>",
version=version,
packages=find_packages(),
package_data={
'athenacli': [
'athenaclirc',
'packages/literals/literals.json'
]
},
description=description,
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/dbcli/athenacli",
install_requires=install_requirements,
entry_points={
'console_scripts': ['athenacli = athenacli.main:cli'],
},
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: SQL',
'Topic :: Database',
'Topic :: Database :: Front-Ends',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
ru
| 0.26433
|
#!/usr/bin/env python
| 1.599817
| 2
|
trainer.py
|
xenbaloch/efficientderain
| 109
|
6625711
|
import time
import datetime
import os
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.autograd as autograd
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
from pytorch_msssim import ssim, ms_ssim, SSIM, MS_SSIM
#import encoding
from torchvision import transforms
import pytorch_ssim
import dataset
import utils
def Pre_train(opt):
# ----------------------------------------
# Network training parameters
# ----------------------------------------
#torch.cuda.set_device(1)
# cudnn benchmark
cudnn.benchmark = opt.cudnn_benchmark
# configurations
save_folder = opt.save_path
sample_folder = opt.sample_path
utils.check_path(save_folder)
utils.check_path(sample_folder)
# Loss functions
if opt.no_gpu == False:
criterion_L1 = torch.nn.L1Loss().cuda()
criterion_L2 = torch.nn.MSELoss().cuda()
#criterion_rainypred = torch.nn.L1Loss().cuda()
criterion_ssim = pytorch_ssim.SSIM().cuda()
else:
criterion_L1 = torch.nn.L1Loss()
criterion_L2 = torch.nn.MSELoss()
#criterion_rainypred = torch.nn.L1Loss().cuda()
criterion_ssim = pytorch_ssim.SSIM()
# Initialize Generator
generator = utils.create_generator(opt)
# To device
if opt.no_gpu == False:
if opt.multi_gpu:
generator = nn.DataParallel(generator)
generator = generator.cuda()
else:
generator = generator.cuda()
# Optimizers
optimizer_G = torch.optim.Adam(filter(lambda p: p.requires_grad, generator.parameters()), lr = opt.lr_g, betas = (opt.b1, opt.b2), weight_decay = opt.weight_decay)
#optimizer_G = torch.optim.Adam(generator.parameters(), lr = opt.lr_g, betas = (opt.b1, opt.b2), weight_decay = opt.weight_decay)
# pretrained model
#encnet = encoding.models.get_model('Encnet_ResNet50s_PContext', pretrained=True).cuda()
#encnet.eval()
#resnet = (torch.nn.Sequential(*list(encnet.children())[:1]))[0]
#resnet.eval()
#encnet_feat = torch.nn.Sequential(*list(resnet.children())[:1])
#encnet_feat.eval()
#for param in encnet.parameters():
# param.requires_grad = False
print("pretrained models loaded")
# Learning rate decrease
def adjust_learning_rate(opt, epoch, optimizer):
target_epoch = opt.epochs - opt.lr_decrease_epoch
remain_epoch = opt.epochs - epoch
if epoch >= opt.lr_decrease_epoch:
lr = opt.lr_g * remain_epoch / target_epoch
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Save the model if pre_train == True
def save_model(opt, epoch, iteration, len_dataset, generator):
"""Save the model at "checkpoint_interval" and its multiple"""
# Define the name of trained model
"""
if opt.save_mode == 'epoch':
model_name = 'KPN_single_image_epoch%d_bs%d_mu%d_sigma%d.pth' % (epoch, opt.train_batch_size, opt.mu, opt.sigma)
if opt.save_mode == 'iter':
model_name = 'KPN_single_image_iter%d_bs%d_mu%d_sigma%d.pth' % (iteration, opt.train_batch_size, opt.mu, opt.sigma)
"""
if opt.save_mode == 'epoch':
model_name = 'KPN_rainy_image_epoch%d_bs%d.pth' % (epoch, opt.train_batch_size)
if opt.save_mode == 'iter':
model_name = 'KPN_rainy_image_iter%d_bs%d.pth' % (iteration, opt.train_batch_size)
save_model_path = os.path.join(opt.save_path, model_name)
if opt.multi_gpu == True:
if opt.save_mode == 'epoch':
if (epoch % opt.save_by_epoch == 0) and (iteration % len_dataset == 0):
torch.save(generator.module.state_dict(), save_model_path)
print('The trained model is successfully saved at epoch %d' % (epoch))
if opt.save_mode == 'iter':
if iteration % opt.save_by_iter == 0:
torch.save(generator.module.state_dict(), save_model_path)
print('The trained model is successfully saved at iteration %d' % (iteration))
else:
if opt.save_mode == 'epoch':
if (epoch % opt.save_by_epoch == 0) and (iteration % len_dataset == 0):
torch.save(generator.state_dict(), save_model_path)
print('The trained model is successfully saved at epoch %d' % (epoch))
if opt.save_mode == 'iter':
if iteration % opt.save_by_iter == 0:
torch.save(generator.state_dict(), save_model_path)
print('The trained model is successfully saved at iteration %d' % (iteration))
# ----------------------------------------
# Network dataset
# ----------------------------------------
# Handle multiple GPUs
#os.environ["CUDA_VISIBLE_DEVICES"] = ""
gpu_num = torch.cuda.device_count()
print("There are %d GPUs used" % gpu_num)
#if opt.no_gpu == False:
#opt.train_batch_size *= gpu_num
#opt.val_batch_size *= gpu_num
#opt.num_workers *= gpu_num
#print(opt.multi_gpu)
'''
print(opt.no_gpu == False)
print(opt.no_gpu)
print(gpu_num)
print(opt.train_batch_size)
'''
# Define the dataset
trainset = dataset.DenoisingDataset(opt)
print('The overall number of training images:', len(trainset))
# Define the dataloader
train_loader = DataLoader(trainset, batch_size = opt.train_batch_size, shuffle = True, num_workers = opt.num_workers, pin_memory = True)
# ----------------------------------------
# Training
# ----------------------------------------
# Count start time
prev_time = time.time()
# For loop training
for epoch in range(opt.epochs):
for i, (true_input, true_target) in enumerate(train_loader):
#print("in epoch %d" % i)
if opt.no_gpu == False:
# To device
true_input = true_input.cuda()
true_target = true_target.cuda()
# Train Generator
optimizer_G.zero_grad()
fake_target = generator(true_input, true_input)
ssim_loss = -criterion_ssim(true_target, fake_target)
'''
#trans for enc_net
enc_trans = transforms.Compose([transforms.Normalize([.485, .456, .406], [.229, .224, .225])])
fake_target_norm = torch.from_numpy(np.zeros(fake_target.size())).cuda()
true_target_norm = torch.from_numpy(np.zeros(true_target.size())).cuda()
for j in range(fake_target.size()[0]):
fake_target_norm[j] = enc_trans(fake_target[j])
true_target_norm[j] = enc_trans(true_target[j])
'''
#print(fake_target_norm.size())
#enc_pred = encnet.evaluate(fake_target_norm.type(torch.FloatTensor).cuda())
#enc_pred = encnet(fake_target_norm.type(torch.FloatTensor).cuda())[0]
#enc_gt = encnet(true_target_norm.type(torch.FloatTensor).cuda())[0]
'''
enc_feat_pred = encnet_feat(fake_target_norm.type(torch.FloatTensor).cuda())[0]
enc_feat_gt = encnet_feat(true_target_norm.type(torch.FloatTensor).cuda())[0]
'''
#rain_layer_gt = true_input - true_target
#rain_layer_pred = true_input - fake_target
#rainy_pred = true_input - (fake_target * rain_layer_pred)
#print(type(true_input))
#print(type(fake_target))
# L1 Loss
Pixellevel_L1_Loss = criterion_L1(fake_target, true_target)
#enc_loss = criterion_L1(enc_pred, enc_gt)
#enc_feat_loss = criterion_L1(enc_feat_pred, enc_feat_gt)
#Pixellevel_L2_Loss = criterion_L2(fake_target, true_target)
#Pixellevel_L2_Loss = criterion_L2(rain_layer_pred, rain_layer_gt)
#Loss_rainypred = criterion_rainypred(rainy_pred, true_input)
# Overall Loss and optimize
loss = Pixellevel_L1_Loss + 0.2*ssim_loss
#loss = Pixellevel_L1_Loss
#loss = Pixellevel_L1_Loss + Pixellevel_L2_Loss + Loss_rainypred
loss.backward()
optimizer_G.step()
#check
'''
for j in encnet.named_parameters():
print(j)
break
'''
# Determine approximate time left
iters_done = epoch * len(train_loader) + i
iters_left = opt.epochs * len(train_loader) - iters_done
time_left = datetime.timedelta(seconds = iters_left * (time.time() - prev_time))
prev_time = time.time()
# Print log
print("\r[Epoch %d/%d] [Batch %d/%d] [Loss: %.4f %.4f] Time_left: %s" %
((epoch + 1), opt.epochs, i, len(train_loader), Pixellevel_L1_Loss.item(), ssim_loss.item(), time_left))
# Save model at certain epochs or iterations
save_model(opt, (epoch + 1), (iters_done + 1), len(train_loader), generator)
# Learning rate decrease at certain epochs
adjust_learning_rate(opt, (epoch + 1), optimizer_G)
### Sample data every epoch
if (epoch + 1) % 1 == 0:
img_list = [true_input, fake_target, true_target]
name_list = ['in', 'pred', 'gt']
utils.save_sample_png(sample_folder = sample_folder, sample_name = 'train_epoch%d' % (epoch + 1), img_list = img_list, name_list = name_list, pixel_max_cnt = 255)
'''
### Validation
val_PSNR = 0
num_of_val_image = 0
for j, (true_input, true_target) in enumerate(val_loader):
# To device
# A is for input image, B is for target image
true_input = true_input.cuda()
true_target = true_target.cuda()
# Forward propagation
with torch.no_grad():
fake_target = generator(true_input)
# Accumulate num of image and val_PSNR
num_of_val_image += true_input.shape[0]
val_PSNR += utils.psnr(fake_target, true_target, 1) * true_input.shape[0]
val_PSNR = val_PSNR / num_of_val_image
### Sample data every epoch
if (epoch + 1) % 1 == 0:
img_list = [true_input, fake_target, true_target]
name_list = ['in', 'pred', 'gt']
utils.save_sample_png(sample_folder = sample_folder, sample_name = 'val_epoch%d' % (epoch + 1), img_list = img_list, name_list = name_list, pixel_max_cnt = 255)
# Record average PSNR
print('PSNR at epoch %d: %.4f' % ((epoch + 1), val_PSNR))
'''
|
import time
import datetime
import os
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.autograd as autograd
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
from pytorch_msssim import ssim, ms_ssim, SSIM, MS_SSIM
#import encoding
from torchvision import transforms
import pytorch_ssim
import dataset
import utils
def Pre_train(opt):
# ----------------------------------------
# Network training parameters
# ----------------------------------------
#torch.cuda.set_device(1)
# cudnn benchmark
cudnn.benchmark = opt.cudnn_benchmark
# configurations
save_folder = opt.save_path
sample_folder = opt.sample_path
utils.check_path(save_folder)
utils.check_path(sample_folder)
# Loss functions
if opt.no_gpu == False:
criterion_L1 = torch.nn.L1Loss().cuda()
criterion_L2 = torch.nn.MSELoss().cuda()
#criterion_rainypred = torch.nn.L1Loss().cuda()
criterion_ssim = pytorch_ssim.SSIM().cuda()
else:
criterion_L1 = torch.nn.L1Loss()
criterion_L2 = torch.nn.MSELoss()
#criterion_rainypred = torch.nn.L1Loss().cuda()
criterion_ssim = pytorch_ssim.SSIM()
# Initialize Generator
generator = utils.create_generator(opt)
# To device
if opt.no_gpu == False:
if opt.multi_gpu:
generator = nn.DataParallel(generator)
generator = generator.cuda()
else:
generator = generator.cuda()
# Optimizers
optimizer_G = torch.optim.Adam(filter(lambda p: p.requires_grad, generator.parameters()), lr = opt.lr_g, betas = (opt.b1, opt.b2), weight_decay = opt.weight_decay)
#optimizer_G = torch.optim.Adam(generator.parameters(), lr = opt.lr_g, betas = (opt.b1, opt.b2), weight_decay = opt.weight_decay)
# pretrained model
#encnet = encoding.models.get_model('Encnet_ResNet50s_PContext', pretrained=True).cuda()
#encnet.eval()
#resnet = (torch.nn.Sequential(*list(encnet.children())[:1]))[0]
#resnet.eval()
#encnet_feat = torch.nn.Sequential(*list(resnet.children())[:1])
#encnet_feat.eval()
#for param in encnet.parameters():
# param.requires_grad = False
print("pretrained models loaded")
# Learning rate decrease
def adjust_learning_rate(opt, epoch, optimizer):
target_epoch = opt.epochs - opt.lr_decrease_epoch
remain_epoch = opt.epochs - epoch
if epoch >= opt.lr_decrease_epoch:
lr = opt.lr_g * remain_epoch / target_epoch
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Save the model if pre_train == True
def save_model(opt, epoch, iteration, len_dataset, generator):
"""Save the model at "checkpoint_interval" and its multiple"""
# Define the name of trained model
"""
if opt.save_mode == 'epoch':
model_name = 'KPN_single_image_epoch%d_bs%d_mu%d_sigma%d.pth' % (epoch, opt.train_batch_size, opt.mu, opt.sigma)
if opt.save_mode == 'iter':
model_name = 'KPN_single_image_iter%d_bs%d_mu%d_sigma%d.pth' % (iteration, opt.train_batch_size, opt.mu, opt.sigma)
"""
if opt.save_mode == 'epoch':
model_name = 'KPN_rainy_image_epoch%d_bs%d.pth' % (epoch, opt.train_batch_size)
if opt.save_mode == 'iter':
model_name = 'KPN_rainy_image_iter%d_bs%d.pth' % (iteration, opt.train_batch_size)
save_model_path = os.path.join(opt.save_path, model_name)
if opt.multi_gpu == True:
if opt.save_mode == 'epoch':
if (epoch % opt.save_by_epoch == 0) and (iteration % len_dataset == 0):
torch.save(generator.module.state_dict(), save_model_path)
print('The trained model is successfully saved at epoch %d' % (epoch))
if opt.save_mode == 'iter':
if iteration % opt.save_by_iter == 0:
torch.save(generator.module.state_dict(), save_model_path)
print('The trained model is successfully saved at iteration %d' % (iteration))
else:
if opt.save_mode == 'epoch':
if (epoch % opt.save_by_epoch == 0) and (iteration % len_dataset == 0):
torch.save(generator.state_dict(), save_model_path)
print('The trained model is successfully saved at epoch %d' % (epoch))
if opt.save_mode == 'iter':
if iteration % opt.save_by_iter == 0:
torch.save(generator.state_dict(), save_model_path)
print('The trained model is successfully saved at iteration %d' % (iteration))
# ----------------------------------------
# Network dataset
# ----------------------------------------
# Handle multiple GPUs
#os.environ["CUDA_VISIBLE_DEVICES"] = ""
gpu_num = torch.cuda.device_count()
print("There are %d GPUs used" % gpu_num)
#if opt.no_gpu == False:
#opt.train_batch_size *= gpu_num
#opt.val_batch_size *= gpu_num
#opt.num_workers *= gpu_num
#print(opt.multi_gpu)
'''
print(opt.no_gpu == False)
print(opt.no_gpu)
print(gpu_num)
print(opt.train_batch_size)
'''
# Define the dataset
trainset = dataset.DenoisingDataset(opt)
print('The overall number of training images:', len(trainset))
# Define the dataloader
train_loader = DataLoader(trainset, batch_size = opt.train_batch_size, shuffle = True, num_workers = opt.num_workers, pin_memory = True)
# ----------------------------------------
# Training
# ----------------------------------------
# Count start time
prev_time = time.time()
# For loop training
for epoch in range(opt.epochs):
for i, (true_input, true_target) in enumerate(train_loader):
#print("in epoch %d" % i)
if opt.no_gpu == False:
# To device
true_input = true_input.cuda()
true_target = true_target.cuda()
# Train Generator
optimizer_G.zero_grad()
fake_target = generator(true_input, true_input)
ssim_loss = -criterion_ssim(true_target, fake_target)
'''
#trans for enc_net
enc_trans = transforms.Compose([transforms.Normalize([.485, .456, .406], [.229, .224, .225])])
fake_target_norm = torch.from_numpy(np.zeros(fake_target.size())).cuda()
true_target_norm = torch.from_numpy(np.zeros(true_target.size())).cuda()
for j in range(fake_target.size()[0]):
fake_target_norm[j] = enc_trans(fake_target[j])
true_target_norm[j] = enc_trans(true_target[j])
'''
#print(fake_target_norm.size())
#enc_pred = encnet.evaluate(fake_target_norm.type(torch.FloatTensor).cuda())
#enc_pred = encnet(fake_target_norm.type(torch.FloatTensor).cuda())[0]
#enc_gt = encnet(true_target_norm.type(torch.FloatTensor).cuda())[0]
'''
enc_feat_pred = encnet_feat(fake_target_norm.type(torch.FloatTensor).cuda())[0]
enc_feat_gt = encnet_feat(true_target_norm.type(torch.FloatTensor).cuda())[0]
'''
#rain_layer_gt = true_input - true_target
#rain_layer_pred = true_input - fake_target
#rainy_pred = true_input - (fake_target * rain_layer_pred)
#print(type(true_input))
#print(type(fake_target))
# L1 Loss
Pixellevel_L1_Loss = criterion_L1(fake_target, true_target)
#enc_loss = criterion_L1(enc_pred, enc_gt)
#enc_feat_loss = criterion_L1(enc_feat_pred, enc_feat_gt)
#Pixellevel_L2_Loss = criterion_L2(fake_target, true_target)
#Pixellevel_L2_Loss = criterion_L2(rain_layer_pred, rain_layer_gt)
#Loss_rainypred = criterion_rainypred(rainy_pred, true_input)
# Overall Loss and optimize
loss = Pixellevel_L1_Loss + 0.2*ssim_loss
#loss = Pixellevel_L1_Loss
#loss = Pixellevel_L1_Loss + Pixellevel_L2_Loss + Loss_rainypred
loss.backward()
optimizer_G.step()
#check
'''
for j in encnet.named_parameters():
print(j)
break
'''
# Determine approximate time left
iters_done = epoch * len(train_loader) + i
iters_left = opt.epochs * len(train_loader) - iters_done
time_left = datetime.timedelta(seconds = iters_left * (time.time() - prev_time))
prev_time = time.time()
# Print log
print("\r[Epoch %d/%d] [Batch %d/%d] [Loss: %.4f %.4f] Time_left: %s" %
((epoch + 1), opt.epochs, i, len(train_loader), Pixellevel_L1_Loss.item(), ssim_loss.item(), time_left))
# Save model at certain epochs or iterations
save_model(opt, (epoch + 1), (iters_done + 1), len(train_loader), generator)
# Learning rate decrease at certain epochs
adjust_learning_rate(opt, (epoch + 1), optimizer_G)
### Sample data every epoch
if (epoch + 1) % 1 == 0:
img_list = [true_input, fake_target, true_target]
name_list = ['in', 'pred', 'gt']
utils.save_sample_png(sample_folder = sample_folder, sample_name = 'train_epoch%d' % (epoch + 1), img_list = img_list, name_list = name_list, pixel_max_cnt = 255)
'''
### Validation
val_PSNR = 0
num_of_val_image = 0
for j, (true_input, true_target) in enumerate(val_loader):
# To device
# A is for input image, B is for target image
true_input = true_input.cuda()
true_target = true_target.cuda()
# Forward propagation
with torch.no_grad():
fake_target = generator(true_input)
# Accumulate num of image and val_PSNR
num_of_val_image += true_input.shape[0]
val_PSNR += utils.psnr(fake_target, true_target, 1) * true_input.shape[0]
val_PSNR = val_PSNR / num_of_val_image
### Sample data every epoch
if (epoch + 1) % 1 == 0:
img_list = [true_input, fake_target, true_target]
name_list = ['in', 'pred', 'gt']
utils.save_sample_png(sample_folder = sample_folder, sample_name = 'val_epoch%d' % (epoch + 1), img_list = img_list, name_list = name_list, pixel_max_cnt = 255)
# Record average PSNR
print('PSNR at epoch %d: %.4f' % ((epoch + 1), val_PSNR))
'''
|
en
| 0.409387
|
#import encoding # ---------------------------------------- # Network training parameters # ---------------------------------------- #torch.cuda.set_device(1) # cudnn benchmark # configurations # Loss functions #criterion_rainypred = torch.nn.L1Loss().cuda() #criterion_rainypred = torch.nn.L1Loss().cuda() # Initialize Generator # To device # Optimizers #optimizer_G = torch.optim.Adam(generator.parameters(), lr = opt.lr_g, betas = (opt.b1, opt.b2), weight_decay = opt.weight_decay) # pretrained model #encnet = encoding.models.get_model('Encnet_ResNet50s_PContext', pretrained=True).cuda() #encnet.eval() #resnet = (torch.nn.Sequential(*list(encnet.children())[:1]))[0] #resnet.eval() #encnet_feat = torch.nn.Sequential(*list(resnet.children())[:1]) #encnet_feat.eval() #for param in encnet.parameters(): # param.requires_grad = False # Learning rate decrease # Save the model if pre_train == True Save the model at "checkpoint_interval" and its multiple # Define the name of trained model if opt.save_mode == 'epoch': model_name = 'KPN_single_image_epoch%d_bs%d_mu%d_sigma%d.pth' % (epoch, opt.train_batch_size, opt.mu, opt.sigma) if opt.save_mode == 'iter': model_name = 'KPN_single_image_iter%d_bs%d_mu%d_sigma%d.pth' % (iteration, opt.train_batch_size, opt.mu, opt.sigma) # ---------------------------------------- # Network dataset # ---------------------------------------- # Handle multiple GPUs #os.environ["CUDA_VISIBLE_DEVICES"] = "" #if opt.no_gpu == False: #opt.train_batch_size *= gpu_num #opt.val_batch_size *= gpu_num #opt.num_workers *= gpu_num #print(opt.multi_gpu) print(opt.no_gpu == False) print(opt.no_gpu) print(gpu_num) print(opt.train_batch_size) # Define the dataset # Define the dataloader # ---------------------------------------- # Training # ---------------------------------------- # Count start time # For loop training #print("in epoch %d" % i) # To device # Train Generator #trans for enc_net enc_trans = transforms.Compose([transforms.Normalize([.485, .456, .406], [.229, .224, .225])]) fake_target_norm = torch.from_numpy(np.zeros(fake_target.size())).cuda() true_target_norm = torch.from_numpy(np.zeros(true_target.size())).cuda() for j in range(fake_target.size()[0]): fake_target_norm[j] = enc_trans(fake_target[j]) true_target_norm[j] = enc_trans(true_target[j]) #print(fake_target_norm.size()) #enc_pred = encnet.evaluate(fake_target_norm.type(torch.FloatTensor).cuda()) #enc_pred = encnet(fake_target_norm.type(torch.FloatTensor).cuda())[0] #enc_gt = encnet(true_target_norm.type(torch.FloatTensor).cuda())[0] enc_feat_pred = encnet_feat(fake_target_norm.type(torch.FloatTensor).cuda())[0] enc_feat_gt = encnet_feat(true_target_norm.type(torch.FloatTensor).cuda())[0] #rain_layer_gt = true_input - true_target #rain_layer_pred = true_input - fake_target #rainy_pred = true_input - (fake_target * rain_layer_pred) #print(type(true_input)) #print(type(fake_target)) # L1 Loss #enc_loss = criterion_L1(enc_pred, enc_gt) #enc_feat_loss = criterion_L1(enc_feat_pred, enc_feat_gt) #Pixellevel_L2_Loss = criterion_L2(fake_target, true_target) #Pixellevel_L2_Loss = criterion_L2(rain_layer_pred, rain_layer_gt) #Loss_rainypred = criterion_rainypred(rainy_pred, true_input) # Overall Loss and optimize #loss = Pixellevel_L1_Loss #loss = Pixellevel_L1_Loss + Pixellevel_L2_Loss + Loss_rainypred #check for j in encnet.named_parameters(): print(j) break # Determine approximate time left # Print log # Save model at certain epochs or iterations # Learning rate decrease at certain epochs ### Sample data every epoch ### Validation val_PSNR = 0 num_of_val_image = 0 for j, (true_input, true_target) in enumerate(val_loader): # To device # A is for input image, B is for target image true_input = true_input.cuda() true_target = true_target.cuda() # Forward propagation with torch.no_grad(): fake_target = generator(true_input) # Accumulate num of image and val_PSNR num_of_val_image += true_input.shape[0] val_PSNR += utils.psnr(fake_target, true_target, 1) * true_input.shape[0] val_PSNR = val_PSNR / num_of_val_image ### Sample data every epoch if (epoch + 1) % 1 == 0: img_list = [true_input, fake_target, true_target] name_list = ['in', 'pred', 'gt'] utils.save_sample_png(sample_folder = sample_folder, sample_name = 'val_epoch%d' % (epoch + 1), img_list = img_list, name_list = name_list, pixel_max_cnt = 255) # Record average PSNR print('PSNR at epoch %d: %.4f' % ((epoch + 1), val_PSNR))
| 2.211857
| 2
|
python/baseline/dy/seq2seq/encoders.py
|
domyounglee/baseline
| 0
|
6625712
|
<reponame>domyounglee/baseline
from collections import namedtuple
import dynet as dy
from baseline.model import register_encoder
from baseline.dy.transformer import TransformerEncoderStack
from baseline.dy.dynety import DynetModel, Linear, rnn_forward_with_state, sequence_mask, unsqueeze
RNNEncoderOutput = namedtuple("RNNEncoderOutput", ("output", "hidden", "src_mask"))
TransformerEncoderOutput = namedtuple("TransformerEncoderOutput", ("output", "src_mask"))
class EncoderBase(DynetModel):
def __init__(self, pc):
super(EncoderBase, self).__init__(pc)
def encode(self, embed_in, src_len, **kwargs):
pass
@register_encoder(name='default')
class RNNEncoder(EncoderBase):
def __init__(self, dsz, pc, hsz=None, rnntype='blstm', layers=1, pdrop=0.5, residual=False, create_src_mask=True, name='rnn-encoder', **kwargs):
pc = pc.add_subcollection(name=name)
super(RNNEncoder, self).__init__(pc)
self.residual = residual
hidden = hsz if hsz is not None else dsz
if rnntype == 'blstm':
self.lstm_forward = dy.VanillaLSTMBuilder(layers, dsz, hidden // 2, self.pc)
self.lstm_backward = dy.VanillaLSTMBuilder(layers, dsz, hidden // 2, self.pc)
else:
self.lstm_forward = dy.VanillaLSTMBuilder(layers, dsz, hidden, self.pc)
self.lstm_backward = None
self.src_mask_fn = sequence_mask if create_src_mask else lambda x, y: (None, None)
self.pdrop = pdrop
def dropout(self, train):
if train:
self.lstm_forward.set_dropout(self.pdrop)
if self.lstm_backward is not None:
self.lstm_forward.set_dropout(self.pdrop)
else:
self.lstm_forward.disable_dropout()
if self.lstm_backward is not None:
self.lstm_forward.disable_dropout()
def __call__(self, embed_in, src_len, train=False, **kwargs):
"""Input Shape: ((T, H), B). Output Shape: [((H,), B)] * T"""
embed_in = list(embed_in)
self.dropout(train)
forward, forward_state = rnn_forward_with_state(self.lstm_forward, embed_in, src_len)
if self.lstm_backward is not None:
backward, backward_state = rnn_forward_with_state(self.lstm_backward, embed_in)
output = [dy.concatenate([f, b]) for f, b in zip(forward, backward)]
hidden = [dy.concatenate([f, b]) for f, b in zip(forward_state, backward_state)]
else:
output = forward
hidden = forward_state
return RNNEncoderOutput(
output=[o + e for o, e in zip(output, embed_in)] if self.residual else output,
hidden=hidden,
src_mask=self.src_mask_fn(src_len, len(output))
)
@register_encoder(name='transformer')
class TransformerEncoderWrapper(EncoderBase):
def __init__(self, dsz, pc, hsz=None, num_heads=4, layers=1, dropout=0.5, name='transformer-encoder-wrapper', **kwargs):
pc = pc.add_subcollection(name=name)
super(TransformerEncoderWrapper, self).__init__(pc)
if hsz is None:
hsz = dsz
self.proj = Linear(hsz, dsz, pc) if hsz != dsz else lambda x: x
self.transformer = TransformerEncoderStack(num_heads, d_model=hsz, pc=pc, pdrop=dropout, scale=True, layers=layers)
def __call__(self, embed_in, src_len, train=False, **kwargs):
"""Input shape: ((T, H), B) Output Shape: [((H,), B)] * T"""
T = embed_in.dim()[0][0]
embed_in = dy.transpose(embed_in)
src_mask = sequence_mask(src_len, T)
src_mask = [unsqueeze(m, 2) for m in src_mask]
x = self.proj(embed_in)
output = self.transformer(x, src_mask, train=train)
output = [out for out in dy.transpose(output)]
return TransformerEncoderOutput(output=output, src_mask=src_mask)
|
from collections import namedtuple
import dynet as dy
from baseline.model import register_encoder
from baseline.dy.transformer import TransformerEncoderStack
from baseline.dy.dynety import DynetModel, Linear, rnn_forward_with_state, sequence_mask, unsqueeze
RNNEncoderOutput = namedtuple("RNNEncoderOutput", ("output", "hidden", "src_mask"))
TransformerEncoderOutput = namedtuple("TransformerEncoderOutput", ("output", "src_mask"))
class EncoderBase(DynetModel):
def __init__(self, pc):
super(EncoderBase, self).__init__(pc)
def encode(self, embed_in, src_len, **kwargs):
pass
@register_encoder(name='default')
class RNNEncoder(EncoderBase):
def __init__(self, dsz, pc, hsz=None, rnntype='blstm', layers=1, pdrop=0.5, residual=False, create_src_mask=True, name='rnn-encoder', **kwargs):
pc = pc.add_subcollection(name=name)
super(RNNEncoder, self).__init__(pc)
self.residual = residual
hidden = hsz if hsz is not None else dsz
if rnntype == 'blstm':
self.lstm_forward = dy.VanillaLSTMBuilder(layers, dsz, hidden // 2, self.pc)
self.lstm_backward = dy.VanillaLSTMBuilder(layers, dsz, hidden // 2, self.pc)
else:
self.lstm_forward = dy.VanillaLSTMBuilder(layers, dsz, hidden, self.pc)
self.lstm_backward = None
self.src_mask_fn = sequence_mask if create_src_mask else lambda x, y: (None, None)
self.pdrop = pdrop
def dropout(self, train):
if train:
self.lstm_forward.set_dropout(self.pdrop)
if self.lstm_backward is not None:
self.lstm_forward.set_dropout(self.pdrop)
else:
self.lstm_forward.disable_dropout()
if self.lstm_backward is not None:
self.lstm_forward.disable_dropout()
def __call__(self, embed_in, src_len, train=False, **kwargs):
"""Input Shape: ((T, H), B). Output Shape: [((H,), B)] * T"""
embed_in = list(embed_in)
self.dropout(train)
forward, forward_state = rnn_forward_with_state(self.lstm_forward, embed_in, src_len)
if self.lstm_backward is not None:
backward, backward_state = rnn_forward_with_state(self.lstm_backward, embed_in)
output = [dy.concatenate([f, b]) for f, b in zip(forward, backward)]
hidden = [dy.concatenate([f, b]) for f, b in zip(forward_state, backward_state)]
else:
output = forward
hidden = forward_state
return RNNEncoderOutput(
output=[o + e for o, e in zip(output, embed_in)] if self.residual else output,
hidden=hidden,
src_mask=self.src_mask_fn(src_len, len(output))
)
@register_encoder(name='transformer')
class TransformerEncoderWrapper(EncoderBase):
def __init__(self, dsz, pc, hsz=None, num_heads=4, layers=1, dropout=0.5, name='transformer-encoder-wrapper', **kwargs):
pc = pc.add_subcollection(name=name)
super(TransformerEncoderWrapper, self).__init__(pc)
if hsz is None:
hsz = dsz
self.proj = Linear(hsz, dsz, pc) if hsz != dsz else lambda x: x
self.transformer = TransformerEncoderStack(num_heads, d_model=hsz, pc=pc, pdrop=dropout, scale=True, layers=layers)
def __call__(self, embed_in, src_len, train=False, **kwargs):
"""Input shape: ((T, H), B) Output Shape: [((H,), B)] * T"""
T = embed_in.dim()[0][0]
embed_in = dy.transpose(embed_in)
src_mask = sequence_mask(src_len, T)
src_mask = [unsqueeze(m, 2) for m in src_mask]
x = self.proj(embed_in)
output = self.transformer(x, src_mask, train=train)
output = [out for out in dy.transpose(output)]
return TransformerEncoderOutput(output=output, src_mask=src_mask)
|
en
| 0.385317
|
Input Shape: ((T, H), B). Output Shape: [((H,), B)] * T Input shape: ((T, H), B) Output Shape: [((H,), B)] * T
| 2.316501
| 2
|
gsoc/aman/get_properties.py
|
ashutosh16399/NSpM
| 78
|
6625713
|
<reponame>ashutosh16399/NSpM<filename>gsoc/aman/get_properties.py
import urllib2, urllib, httplib, json, sys, csv, io
import argparse
from bs4 import BeautifulSoup
parser = argparse.ArgumentParser()
requiredNamed = parser.add_argument_group('Required Arguments');
requiredNamed.add_argument('--url', dest='url', metavar='url', help='Webpage URL: eg-http://mappings.dbpedia.org/server/ontology/classes/Place', required=True)
args = parser.parse_args()
quote_page = args.url
page = urllib2.urlopen(quote_page)
soup = BeautifulSoup(page, "html.parser")
# print type(soup)
fl = 0
for rows in soup.find_all("tr"):
x = rows.find_all("td");
if len(x) <= 2:
fl = 1
continue
if fl == 1:
fl = 2
continue
name = rows.find_all("td")[0].get_text().replace(" (edit)","")
label = rows.find_all("td")[1].get_text()
dom = rows.find_all("td")[2].get_text()
rng = rows.find_all("td")[3].get_text()
final = name + "," + label + "," + dom + "," + rng
print final.encode('utf-8')
# with io.open("test.csv", mode='w', encoding='utf-8') as toWrite:
# writer = csv.writer(toWrite)
# writer.writerows(props)
|
import urllib2, urllib, httplib, json, sys, csv, io
import argparse
from bs4 import BeautifulSoup
parser = argparse.ArgumentParser()
requiredNamed = parser.add_argument_group('Required Arguments');
requiredNamed.add_argument('--url', dest='url', metavar='url', help='Webpage URL: eg-http://mappings.dbpedia.org/server/ontology/classes/Place', required=True)
args = parser.parse_args()
quote_page = args.url
page = urllib2.urlopen(quote_page)
soup = BeautifulSoup(page, "html.parser")
# print type(soup)
fl = 0
for rows in soup.find_all("tr"):
x = rows.find_all("td");
if len(x) <= 2:
fl = 1
continue
if fl == 1:
fl = 2
continue
name = rows.find_all("td")[0].get_text().replace(" (edit)","")
label = rows.find_all("td")[1].get_text()
dom = rows.find_all("td")[2].get_text()
rng = rows.find_all("td")[3].get_text()
final = name + "," + label + "," + dom + "," + rng
print final.encode('utf-8')
# with io.open("test.csv", mode='w', encoding='utf-8') as toWrite:
# writer = csv.writer(toWrite)
# writer.writerows(props)
|
en
| 0.86698
|
# print type(soup) # with io.open("test.csv", mode='w', encoding='utf-8') as toWrite: # writer = csv.writer(toWrite) # writer.writerows(props)
| 3.080496
| 3
|
statham/schema/parser.py
|
george-fry/statham-schema
| 23
|
6625714
|
# pylint: disable=too-many-lines
"""Parsing tools to convert JSON Schema dictionaries to Element instances.
Some JSON Schema documents will be converted to an equivalent but structurally
differing representation. In particular, those that combine composition
keywords or use multiple types will be recomposed using ``"allOf"`` and
``"anyOf"`` respectively. See full docs for more
details.
"""
from collections import defaultdict
from functools import partial
import inspect
from itertools import chain
import operator as op
import re
import string
from typing import Any, Callable, DefaultDict, Dict, Iterable, List, Type, Union
import unicodedata
from statham.schema.constants import (
COMPOSITION_KEYWORDS,
NotPassed,
UNSUPPORTED_SCHEMA_KEYWORDS,
)
from statham.schema.elements import (
AllOf,
AnyOf,
Array,
Boolean,
CompositionElement,
Integer,
Not,
Nothing,
Null,
Number,
Object,
OneOf,
Element,
String,
)
from statham.schema.elements.meta import (
ObjectClassDict,
ObjectMeta,
RESERVED_PROPERTIES,
)
from statham.schema.exceptions import (
FeatureNotImplementedError,
SchemaParseError,
)
from statham.schema.helpers import expand, reraise, split_dict
from statham.schema.property import _Property
_TYPE_MAPPING = {
"array": Array,
"boolean": Boolean,
"integer": Integer,
"null": Null,
"number": Number,
"string": String,
}
class _ParseState:
"""Recusive state.
Used to de-duplicate models which are traversed multiple times, and to
rename distinct models with the same name.
"""
def __init__(self):
self.seen: DefaultDict[str, List[ObjectMeta]] = defaultdict(list)
def dedupe(self, object_type: ObjectMeta):
"""Deduplicate a parsed model.
If it has been seen before, then return the existing one. Otherwise
ensure the model's name is distinct from other models and keep store
it.
"""
name = object_type.__name__
for existing in self.seen[name]:
if object_type == existing:
return existing
count = len(self.seen[name])
if count:
object_type.__name__ = name + f"_{count}"
self.seen[name].append(object_type)
return object_type
def parse(schema: Dict[str, Any]) -> List[Element]:
"""Parse a JSON Schema document to Element format.
Assumes references are already resolved, and that any ``"object"`` schemas
or sub-schemas contain either a ``"title"`` annotation or an
``"_x_autotitle"`` annotation. See
`json-ref-dict <https://pypi.org/project/json-ref-dict/0.6.0/>`_ for
reference resolution and annotation tools.
:return: A list of schema elements, starting with the top level element,
followed by each element in the top-level schema ``"definitions"``.
"""
state = _ParseState()
return [parse_element(schema, state)] + [
parse_element(definition, state)
for definition in schema.get("definitions", {}).values()
if isinstance(definition, (dict, bool, Element))
]
@reraise(
RecursionError,
FeatureNotImplementedError,
"Could not parse cyclical dependencies of this schema.",
)
def parse_element(
schema: Union[bool, Dict[str, Any]], state: _ParseState = None
) -> Element:
"""Parse a single JSON Schema element to an Element object.
Called by :func:`parse` when parsing entire documents.
>>> parse_element({"type": "string", "minLength": 3})
String(minLength=3)
:raises: :exc:`~statham.schema.exceptions.FeatureNotImplementedError` if
recursive cycles are detected.
:raises: :exc:`statham.schema.exceptions.SchemaParseError` if problems are
found in the provided schema.
:return: A single :class:`~statham.schema.elements.Element` object
equivalent to the schema described by :paramref:`parse_element.schema`.
"""
if isinstance(schema, bool):
return Element() if schema else Nothing()
state = state or _ParseState()
if isinstance(schema, Element):
return schema
if set(schema) & UNSUPPORTED_SCHEMA_KEYWORDS:
raise FeatureNotImplementedError.unsupported_keywords(
set(schema) & UNSUPPORTED_SCHEMA_KEYWORDS
)
for literal_key in ("default", "const", "enum"):
if literal_key in schema:
schema[literal_key] = _parse_literal(schema[literal_key])
for keyword, parser in (
("properties", _parse_properties),
("items", _parse_items),
("patternProperties", _parse_pattern_properties),
("propertyNames", _parse_property_names),
("contains", _parse_contains),
("dependencies", _parse_dependencies),
):
if keyword in schema:
schema[keyword] = parser(schema, state) # type: ignore
schema["additionalProperties"] = _parse_additional_properties(schema, state)
schema["additionalItems"] = _parse_additional_items(schema, state)
if set(COMPOSITION_KEYWORDS) & set(schema):
return _parse_composition(schema, state)
if "type" not in schema:
return Element(**_keyword_filter(Element)(schema))
return _parse_typed(schema["type"], schema, state)
def _parse_literal(literal: Any) -> Any:
"""Parse literal values from schema.
Keywords like `const`, `enum` and `default` refer to non-schema values.
Annotations should be removed to prevent side effects.
"""
if not isinstance(literal, (dict, list)):
return literal
if isinstance(literal, list):
return [_parse_literal(val) for val in literal]
return {
key: _parse_literal(val)
for key, val in literal.items()
if key != "_x_autotitle"
}
def _parse_contains(
schema: Dict[str, Any], state: _ParseState = None
) -> Element:
"""Parse schema contains keyword."""
state = state or _ParseState()
return parse_element(schema["contains"], state)
def _parse_composition(
schema: Dict[str, Any], state: _ParseState = None
) -> Element:
"""Parse a schema with composition keywords.
Handles multiple composition keywords by wrapping them in an AllOf
element. Similarly, non-keyword elements are parsed as usual and
included in an allOf element.
For example:
```python
schema = {
"type": "integer",
"oneOf": [{"minimum": 3}, {"maximum": 5}],
"anyOf": [{"multipleOf": 2}, {"multipleOf": 3}],
}
_parse_composition(schema) == AllOf(
Integer(),
OneOf(Element(minimum=3), Element(maximum=5)),
AnyOf(Element(multipleOf=2), Element(multipleOf=3)),
)
```
"""
state = state or _ParseState()
composition, other = split_dict(set(COMPOSITION_KEYWORDS) | {"default"})(
schema
)
base_element = parse_element(other, state)
for key in set(COMPOSITION_KEYWORDS) - {"not"}:
composition[key] = [
parse_element(sub_schema, state)
for sub_schema in composition.get(key, [])
]
all_of = [base_element] + composition["allOf"]
all_of.append(_compose_elements(OneOf, composition["oneOf"]))
all_of.append(_compose_elements(AnyOf, composition["anyOf"]))
if "not" in composition:
all_of.append(Not(parse_element(schema["not"], state)))
element = _compose_elements(
AllOf, filter(partial(op.ne, Element()), all_of)
)
default = schema.get("default", NotPassed())
if isinstance(element, ObjectMeta):
return AllOf(element, default=default)
element.default = default or element.default
return element
def _parse_typed(
type_value: Any, schema: Dict[str, Any], state: _ParseState = None
) -> Element:
"""Parse a typed schema with no composition keywords."""
state = state or _ParseState()
if not isinstance(type_value, (str, list)):
raise SchemaParseError.invalid_type(type_value)
if isinstance(type_value, list):
return _parse_multi_typed(type_value, schema, state)
if schema["type"] == "object":
return _parse_object(schema, state)
if schema["type"] == "array":
return _parse_array(schema, state)
element_type = _TYPE_MAPPING[type_value]
sub_schema = _keyword_filter(element_type)(schema)
return element_type(**sub_schema)
def _parse_multi_typed(
type_list: List[str], schema: Dict[str, Any], state: _ParseState = None
) -> CompositionElement:
"""Parse a schema with multiple type values.
Converts schema to an equivalent representation using "anyOf". For example:
```python
{"type": ["string", "integer"]}
```
becomes
```
{"anyOf": [{"type": "string"}, {"type": "integer"}]}
```
"""
state = state or _ParseState()
default = schema.get("default", NotPassed())
schema = {key: val for key, val in schema.items() if key != "default"}
if len(type_list) == 1:
return parse_element({**schema, "type": type_list[0]}, state)
return AnyOf(
*(
parse_element({**schema, "type": type_value}, state)
for type_value in type_list
),
default=default,
)
def _parse_object(
schema: Dict[str, Any], state: _ParseState = None
) -> ObjectMeta:
"""Parse an object schema element to an `Object` subclass.
The name of the generated class is derived from the following keys
in precedence order:
- "title"
- "_x_autotitle"
Also requires that "properties" and "additionalProperties" values have
already been parsed.
:raises SchemaParseError: if keys exist from which to derive the class
title.
:return: The ``Object`` model equivalent to the schema.
"""
state = state or _ParseState()
title = schema.get("title", schema.get("_x_autotitle"))
if not title:
raise SchemaParseError.missing_title(schema)
title = _title_format(title)
properties = schema.get("properties", {})
properties.update(
{
_parse_attribute_name(key): _Property(
Element(), required=True, source=key
)
for key in schema.get("required", [])
if _parse_attribute_name(key) not in properties
}
)
class_dict = ObjectClassDict()
for key, value in properties.items():
class_dict[key] = value
cls_args = dict(additionalProperties=schema["additionalProperties"])
for key in [
"patternProperties",
"minProperties",
"maxProperties",
"propertyNames",
"dependencies",
"const",
"enum",
"default",
]:
if key in schema:
cls_args[key] = schema[key]
object_type = ObjectMeta(title, (Object,), class_dict, **cls_args)
return state.dedupe(object_type)
def _parse_properties(
schema: Dict[str, Any], state: _ParseState = None
) -> Dict[str, _Property]:
"""Parse properties from a schema element."""
state = state or _ParseState()
required = set(schema.get("required", []))
properties = schema.get("properties", {})
return {
**{
_parse_attribute_name(key): _Property(
parse_element(value, state),
required=key in required,
source=key,
)
for key, value in properties.items()
# Ignore malformed values.
if isinstance(value, (dict, bool))
},
**{
_parse_attribute_name(key): prop
for key, prop in properties.items()
if isinstance(prop, _Property)
},
}
def _parse_attribute_name(name: str) -> str:
"""Convert attibute name to valid python attribute.
Attempts to replace special characters with their unicode names.
"""
def _char_map(idx: int, char: str) -> str:
if char.isalnum() or char in ("_", "-", " "):
return char
if char in string.whitespace:
return "_"
label = unicodedata.name(char, "unknown").lower()
if idx != 0 and name[idx - 1] != "_":
label = "_" + label
if idx != len(name) - 1 and name[idx + 1] != "_":
label = label + "_"
return label
chars = map(expand(_char_map), enumerate(name))
name = "".join(chars).replace(" ", "_").replace("-", "_")
if not name:
return "blank"
first_chars = set(string.ascii_letters) | {"_"}
if name[0] not in first_chars:
name = f"_{name}"
if name in RESERVED_PROPERTIES:
name = f"{name}_"
return name
def _parse_pattern_properties(
schema: Dict[str, Any], state: _ParseState = None
) -> Dict[str, Element]:
"""Parse schema patternProperties keyword."""
state = state or _ParseState()
return {
**{
key: parse_element(value, state)
for key, value in schema["patternProperties"].items()
if isinstance(value, (dict, bool))
},
**{
key: value
for key, value in schema["patternProperties"].items()
if isinstance(value, Element)
},
}
def _parse_property_names(
schema: Dict[str, Any], state: _ParseState = None
) -> Element:
"""Parse schema propertyNames keyword."""
state = state or _ParseState()
return parse_element(schema["propertyNames"], state)
def _parse_additional(
key: str, schema: Dict[str, Any], state: _ParseState = None
) -> Union[Element, bool]:
"""Parse additional items or properties.
Booleans are retained for these values, as they are more semantically
meaningful than in general schemas.
"""
state = state or _ParseState()
additional = schema.get(key, True)
if isinstance(additional, bool):
return additional
return parse_element(additional, state)
def _parse_additional_properties(
schema: Dict[str, Any], state: _ParseState = None
) -> Union[Element, bool]:
"""Parse additionalProperties from a schema element.
If key is not present, defaults to `True`.
"""
return _parse_additional("additionalProperties", schema, state)
def _parse_additional_items(
schema: Dict[str, Any], state: _ParseState = None
) -> Union[Element, bool]:
"""Parse additionalProperties from a schema element.
If key is not present, defaults to `True`.
"""
return _parse_additional("additionalItems", schema, state)
def _parse_array(schema: Dict[str, Any], state: _ParseState = None) -> Array:
"""Parse an array schema element."""
state = state or _ParseState()
items = schema.get("items", Element())
return Array(**{**_keyword_filter(Array)(schema), "items": items})
def _parse_items(
schema: Dict[str, Any], state: _ParseState = None
) -> Union[Element, List[Element]]:
"""Parse array items keyword to a schema Element.
If not present, defaults to `Element()`.
"""
state = state or _ParseState()
items = schema.get("items", {})
if isinstance(items, list):
return [parse_element(item, state) for item in items]
return parse_element(items, state)
def _parse_dependencies(
schema: Dict[str, Any], state: _ParseState = None
) -> Dict[str, Union[List[str], Element]]:
"""Parse dependencies keyword from schema."""
state = state or _ParseState()
return {
**{
key: value
for key, value in schema["dependencies"].items()
if isinstance(value, (list, Element))
},
**{
key: parse_element(value, state)
for key, value in schema["dependencies"].items()
if isinstance(value, (dict, bool))
},
}
def _compose_elements(
element_type: Type[CompositionElement], elements: Iterable[Element]
) -> Element:
"""Create a composition element from a type and list of component elements.
Filters out trivial elements, and simplifies compositions with only one
composed element.
"""
elements = list(elements)
if not elements:
return Element()
if len(elements) == 1:
return elements[0]
return element_type(*elements)
def _keyword_filter(type_: Type) -> Callable[[Dict[str, Any]], Dict[str, Any]]:
"""Create a filter to pull out only relevant keywords for a given type."""
params = inspect.signature(type_.__init__).parameters.values()
args = {param.name for param in params}
def _filter(schema: Dict[str, Any]) -> Dict[str, Any]:
return {key: value for key, value in schema.items() if key in args}
return _filter
def _title_format(name: str) -> str:
"""Convert titles in schemas to class names."""
words = list(filter(None, re.split("[^a-zA-Z0-9]", name)))
segments = chain.from_iterable(
[
re.findall("[A-Z][^A-Z]*", word[0].upper() + word[1:])
for word in words
]
)
return "".join(segment.title() for segment in segments)
|
# pylint: disable=too-many-lines
"""Parsing tools to convert JSON Schema dictionaries to Element instances.
Some JSON Schema documents will be converted to an equivalent but structurally
differing representation. In particular, those that combine composition
keywords or use multiple types will be recomposed using ``"allOf"`` and
``"anyOf"`` respectively. See full docs for more
details.
"""
from collections import defaultdict
from functools import partial
import inspect
from itertools import chain
import operator as op
import re
import string
from typing import Any, Callable, DefaultDict, Dict, Iterable, List, Type, Union
import unicodedata
from statham.schema.constants import (
COMPOSITION_KEYWORDS,
NotPassed,
UNSUPPORTED_SCHEMA_KEYWORDS,
)
from statham.schema.elements import (
AllOf,
AnyOf,
Array,
Boolean,
CompositionElement,
Integer,
Not,
Nothing,
Null,
Number,
Object,
OneOf,
Element,
String,
)
from statham.schema.elements.meta import (
ObjectClassDict,
ObjectMeta,
RESERVED_PROPERTIES,
)
from statham.schema.exceptions import (
FeatureNotImplementedError,
SchemaParseError,
)
from statham.schema.helpers import expand, reraise, split_dict
from statham.schema.property import _Property
_TYPE_MAPPING = {
"array": Array,
"boolean": Boolean,
"integer": Integer,
"null": Null,
"number": Number,
"string": String,
}
class _ParseState:
"""Recusive state.
Used to de-duplicate models which are traversed multiple times, and to
rename distinct models with the same name.
"""
def __init__(self):
self.seen: DefaultDict[str, List[ObjectMeta]] = defaultdict(list)
def dedupe(self, object_type: ObjectMeta):
"""Deduplicate a parsed model.
If it has been seen before, then return the existing one. Otherwise
ensure the model's name is distinct from other models and keep store
it.
"""
name = object_type.__name__
for existing in self.seen[name]:
if object_type == existing:
return existing
count = len(self.seen[name])
if count:
object_type.__name__ = name + f"_{count}"
self.seen[name].append(object_type)
return object_type
def parse(schema: Dict[str, Any]) -> List[Element]:
"""Parse a JSON Schema document to Element format.
Assumes references are already resolved, and that any ``"object"`` schemas
or sub-schemas contain either a ``"title"`` annotation or an
``"_x_autotitle"`` annotation. See
`json-ref-dict <https://pypi.org/project/json-ref-dict/0.6.0/>`_ for
reference resolution and annotation tools.
:return: A list of schema elements, starting with the top level element,
followed by each element in the top-level schema ``"definitions"``.
"""
state = _ParseState()
return [parse_element(schema, state)] + [
parse_element(definition, state)
for definition in schema.get("definitions", {}).values()
if isinstance(definition, (dict, bool, Element))
]
@reraise(
RecursionError,
FeatureNotImplementedError,
"Could not parse cyclical dependencies of this schema.",
)
def parse_element(
schema: Union[bool, Dict[str, Any]], state: _ParseState = None
) -> Element:
"""Parse a single JSON Schema element to an Element object.
Called by :func:`parse` when parsing entire documents.
>>> parse_element({"type": "string", "minLength": 3})
String(minLength=3)
:raises: :exc:`~statham.schema.exceptions.FeatureNotImplementedError` if
recursive cycles are detected.
:raises: :exc:`statham.schema.exceptions.SchemaParseError` if problems are
found in the provided schema.
:return: A single :class:`~statham.schema.elements.Element` object
equivalent to the schema described by :paramref:`parse_element.schema`.
"""
if isinstance(schema, bool):
return Element() if schema else Nothing()
state = state or _ParseState()
if isinstance(schema, Element):
return schema
if set(schema) & UNSUPPORTED_SCHEMA_KEYWORDS:
raise FeatureNotImplementedError.unsupported_keywords(
set(schema) & UNSUPPORTED_SCHEMA_KEYWORDS
)
for literal_key in ("default", "const", "enum"):
if literal_key in schema:
schema[literal_key] = _parse_literal(schema[literal_key])
for keyword, parser in (
("properties", _parse_properties),
("items", _parse_items),
("patternProperties", _parse_pattern_properties),
("propertyNames", _parse_property_names),
("contains", _parse_contains),
("dependencies", _parse_dependencies),
):
if keyword in schema:
schema[keyword] = parser(schema, state) # type: ignore
schema["additionalProperties"] = _parse_additional_properties(schema, state)
schema["additionalItems"] = _parse_additional_items(schema, state)
if set(COMPOSITION_KEYWORDS) & set(schema):
return _parse_composition(schema, state)
if "type" not in schema:
return Element(**_keyword_filter(Element)(schema))
return _parse_typed(schema["type"], schema, state)
def _parse_literal(literal: Any) -> Any:
"""Parse literal values from schema.
Keywords like `const`, `enum` and `default` refer to non-schema values.
Annotations should be removed to prevent side effects.
"""
if not isinstance(literal, (dict, list)):
return literal
if isinstance(literal, list):
return [_parse_literal(val) for val in literal]
return {
key: _parse_literal(val)
for key, val in literal.items()
if key != "_x_autotitle"
}
def _parse_contains(
schema: Dict[str, Any], state: _ParseState = None
) -> Element:
"""Parse schema contains keyword."""
state = state or _ParseState()
return parse_element(schema["contains"], state)
def _parse_composition(
schema: Dict[str, Any], state: _ParseState = None
) -> Element:
"""Parse a schema with composition keywords.
Handles multiple composition keywords by wrapping them in an AllOf
element. Similarly, non-keyword elements are parsed as usual and
included in an allOf element.
For example:
```python
schema = {
"type": "integer",
"oneOf": [{"minimum": 3}, {"maximum": 5}],
"anyOf": [{"multipleOf": 2}, {"multipleOf": 3}],
}
_parse_composition(schema) == AllOf(
Integer(),
OneOf(Element(minimum=3), Element(maximum=5)),
AnyOf(Element(multipleOf=2), Element(multipleOf=3)),
)
```
"""
state = state or _ParseState()
composition, other = split_dict(set(COMPOSITION_KEYWORDS) | {"default"})(
schema
)
base_element = parse_element(other, state)
for key in set(COMPOSITION_KEYWORDS) - {"not"}:
composition[key] = [
parse_element(sub_schema, state)
for sub_schema in composition.get(key, [])
]
all_of = [base_element] + composition["allOf"]
all_of.append(_compose_elements(OneOf, composition["oneOf"]))
all_of.append(_compose_elements(AnyOf, composition["anyOf"]))
if "not" in composition:
all_of.append(Not(parse_element(schema["not"], state)))
element = _compose_elements(
AllOf, filter(partial(op.ne, Element()), all_of)
)
default = schema.get("default", NotPassed())
if isinstance(element, ObjectMeta):
return AllOf(element, default=default)
element.default = default or element.default
return element
def _parse_typed(
type_value: Any, schema: Dict[str, Any], state: _ParseState = None
) -> Element:
"""Parse a typed schema with no composition keywords."""
state = state or _ParseState()
if not isinstance(type_value, (str, list)):
raise SchemaParseError.invalid_type(type_value)
if isinstance(type_value, list):
return _parse_multi_typed(type_value, schema, state)
if schema["type"] == "object":
return _parse_object(schema, state)
if schema["type"] == "array":
return _parse_array(schema, state)
element_type = _TYPE_MAPPING[type_value]
sub_schema = _keyword_filter(element_type)(schema)
return element_type(**sub_schema)
def _parse_multi_typed(
type_list: List[str], schema: Dict[str, Any], state: _ParseState = None
) -> CompositionElement:
"""Parse a schema with multiple type values.
Converts schema to an equivalent representation using "anyOf". For example:
```python
{"type": ["string", "integer"]}
```
becomes
```
{"anyOf": [{"type": "string"}, {"type": "integer"}]}
```
"""
state = state or _ParseState()
default = schema.get("default", NotPassed())
schema = {key: val for key, val in schema.items() if key != "default"}
if len(type_list) == 1:
return parse_element({**schema, "type": type_list[0]}, state)
return AnyOf(
*(
parse_element({**schema, "type": type_value}, state)
for type_value in type_list
),
default=default,
)
def _parse_object(
schema: Dict[str, Any], state: _ParseState = None
) -> ObjectMeta:
"""Parse an object schema element to an `Object` subclass.
The name of the generated class is derived from the following keys
in precedence order:
- "title"
- "_x_autotitle"
Also requires that "properties" and "additionalProperties" values have
already been parsed.
:raises SchemaParseError: if keys exist from which to derive the class
title.
:return: The ``Object`` model equivalent to the schema.
"""
state = state or _ParseState()
title = schema.get("title", schema.get("_x_autotitle"))
if not title:
raise SchemaParseError.missing_title(schema)
title = _title_format(title)
properties = schema.get("properties", {})
properties.update(
{
_parse_attribute_name(key): _Property(
Element(), required=True, source=key
)
for key in schema.get("required", [])
if _parse_attribute_name(key) not in properties
}
)
class_dict = ObjectClassDict()
for key, value in properties.items():
class_dict[key] = value
cls_args = dict(additionalProperties=schema["additionalProperties"])
for key in [
"patternProperties",
"minProperties",
"maxProperties",
"propertyNames",
"dependencies",
"const",
"enum",
"default",
]:
if key in schema:
cls_args[key] = schema[key]
object_type = ObjectMeta(title, (Object,), class_dict, **cls_args)
return state.dedupe(object_type)
def _parse_properties(
schema: Dict[str, Any], state: _ParseState = None
) -> Dict[str, _Property]:
"""Parse properties from a schema element."""
state = state or _ParseState()
required = set(schema.get("required", []))
properties = schema.get("properties", {})
return {
**{
_parse_attribute_name(key): _Property(
parse_element(value, state),
required=key in required,
source=key,
)
for key, value in properties.items()
# Ignore malformed values.
if isinstance(value, (dict, bool))
},
**{
_parse_attribute_name(key): prop
for key, prop in properties.items()
if isinstance(prop, _Property)
},
}
def _parse_attribute_name(name: str) -> str:
"""Convert attibute name to valid python attribute.
Attempts to replace special characters with their unicode names.
"""
def _char_map(idx: int, char: str) -> str:
if char.isalnum() or char in ("_", "-", " "):
return char
if char in string.whitespace:
return "_"
label = unicodedata.name(char, "unknown").lower()
if idx != 0 and name[idx - 1] != "_":
label = "_" + label
if idx != len(name) - 1 and name[idx + 1] != "_":
label = label + "_"
return label
chars = map(expand(_char_map), enumerate(name))
name = "".join(chars).replace(" ", "_").replace("-", "_")
if not name:
return "blank"
first_chars = set(string.ascii_letters) | {"_"}
if name[0] not in first_chars:
name = f"_{name}"
if name in RESERVED_PROPERTIES:
name = f"{name}_"
return name
def _parse_pattern_properties(
schema: Dict[str, Any], state: _ParseState = None
) -> Dict[str, Element]:
"""Parse schema patternProperties keyword."""
state = state or _ParseState()
return {
**{
key: parse_element(value, state)
for key, value in schema["patternProperties"].items()
if isinstance(value, (dict, bool))
},
**{
key: value
for key, value in schema["patternProperties"].items()
if isinstance(value, Element)
},
}
def _parse_property_names(
schema: Dict[str, Any], state: _ParseState = None
) -> Element:
"""Parse schema propertyNames keyword."""
state = state or _ParseState()
return parse_element(schema["propertyNames"], state)
def _parse_additional(
key: str, schema: Dict[str, Any], state: _ParseState = None
) -> Union[Element, bool]:
"""Parse additional items or properties.
Booleans are retained for these values, as they are more semantically
meaningful than in general schemas.
"""
state = state or _ParseState()
additional = schema.get(key, True)
if isinstance(additional, bool):
return additional
return parse_element(additional, state)
def _parse_additional_properties(
schema: Dict[str, Any], state: _ParseState = None
) -> Union[Element, bool]:
"""Parse additionalProperties from a schema element.
If key is not present, defaults to `True`.
"""
return _parse_additional("additionalProperties", schema, state)
def _parse_additional_items(
schema: Dict[str, Any], state: _ParseState = None
) -> Union[Element, bool]:
"""Parse additionalProperties from a schema element.
If key is not present, defaults to `True`.
"""
return _parse_additional("additionalItems", schema, state)
def _parse_array(schema: Dict[str, Any], state: _ParseState = None) -> Array:
"""Parse an array schema element."""
state = state or _ParseState()
items = schema.get("items", Element())
return Array(**{**_keyword_filter(Array)(schema), "items": items})
def _parse_items(
schema: Dict[str, Any], state: _ParseState = None
) -> Union[Element, List[Element]]:
"""Parse array items keyword to a schema Element.
If not present, defaults to `Element()`.
"""
state = state or _ParseState()
items = schema.get("items", {})
if isinstance(items, list):
return [parse_element(item, state) for item in items]
return parse_element(items, state)
def _parse_dependencies(
schema: Dict[str, Any], state: _ParseState = None
) -> Dict[str, Union[List[str], Element]]:
"""Parse dependencies keyword from schema."""
state = state or _ParseState()
return {
**{
key: value
for key, value in schema["dependencies"].items()
if isinstance(value, (list, Element))
},
**{
key: parse_element(value, state)
for key, value in schema["dependencies"].items()
if isinstance(value, (dict, bool))
},
}
def _compose_elements(
element_type: Type[CompositionElement], elements: Iterable[Element]
) -> Element:
"""Create a composition element from a type and list of component elements.
Filters out trivial elements, and simplifies compositions with only one
composed element.
"""
elements = list(elements)
if not elements:
return Element()
if len(elements) == 1:
return elements[0]
return element_type(*elements)
def _keyword_filter(type_: Type) -> Callable[[Dict[str, Any]], Dict[str, Any]]:
"""Create a filter to pull out only relevant keywords for a given type."""
params = inspect.signature(type_.__init__).parameters.values()
args = {param.name for param in params}
def _filter(schema: Dict[str, Any]) -> Dict[str, Any]:
return {key: value for key, value in schema.items() if key in args}
return _filter
def _title_format(name: str) -> str:
"""Convert titles in schemas to class names."""
words = list(filter(None, re.split("[^a-zA-Z0-9]", name)))
segments = chain.from_iterable(
[
re.findall("[A-Z][^A-Z]*", word[0].upper() + word[1:])
for word in words
]
)
return "".join(segment.title() for segment in segments)
|
en
| 0.693054
|
# pylint: disable=too-many-lines Parsing tools to convert JSON Schema dictionaries to Element instances. Some JSON Schema documents will be converted to an equivalent but structurally differing representation. In particular, those that combine composition keywords or use multiple types will be recomposed using ``"allOf"`` and ``"anyOf"`` respectively. See full docs for more details. Recusive state. Used to de-duplicate models which are traversed multiple times, and to rename distinct models with the same name. Deduplicate a parsed model. If it has been seen before, then return the existing one. Otherwise ensure the model's name is distinct from other models and keep store it. Parse a JSON Schema document to Element format. Assumes references are already resolved, and that any ``"object"`` schemas or sub-schemas contain either a ``"title"`` annotation or an ``"_x_autotitle"`` annotation. See `json-ref-dict <https://pypi.org/project/json-ref-dict/0.6.0/>`_ for reference resolution and annotation tools. :return: A list of schema elements, starting with the top level element, followed by each element in the top-level schema ``"definitions"``. Parse a single JSON Schema element to an Element object. Called by :func:`parse` when parsing entire documents. >>> parse_element({"type": "string", "minLength": 3}) String(minLength=3) :raises: :exc:`~statham.schema.exceptions.FeatureNotImplementedError` if recursive cycles are detected. :raises: :exc:`statham.schema.exceptions.SchemaParseError` if problems are found in the provided schema. :return: A single :class:`~statham.schema.elements.Element` object equivalent to the schema described by :paramref:`parse_element.schema`. # type: ignore Parse literal values from schema. Keywords like `const`, `enum` and `default` refer to non-schema values. Annotations should be removed to prevent side effects. Parse schema contains keyword. Parse a schema with composition keywords. Handles multiple composition keywords by wrapping them in an AllOf element. Similarly, non-keyword elements are parsed as usual and included in an allOf element. For example: ```python schema = { "type": "integer", "oneOf": [{"minimum": 3}, {"maximum": 5}], "anyOf": [{"multipleOf": 2}, {"multipleOf": 3}], } _parse_composition(schema) == AllOf( Integer(), OneOf(Element(minimum=3), Element(maximum=5)), AnyOf(Element(multipleOf=2), Element(multipleOf=3)), ) ``` Parse a typed schema with no composition keywords. Parse a schema with multiple type values. Converts schema to an equivalent representation using "anyOf". For example: ```python {"type": ["string", "integer"]} ``` becomes ``` {"anyOf": [{"type": "string"}, {"type": "integer"}]} ``` Parse an object schema element to an `Object` subclass. The name of the generated class is derived from the following keys in precedence order: - "title" - "_x_autotitle" Also requires that "properties" and "additionalProperties" values have already been parsed. :raises SchemaParseError: if keys exist from which to derive the class title. :return: The ``Object`` model equivalent to the schema. Parse properties from a schema element. # Ignore malformed values. Convert attibute name to valid python attribute. Attempts to replace special characters with their unicode names. Parse schema patternProperties keyword. Parse schema propertyNames keyword. Parse additional items or properties. Booleans are retained for these values, as they are more semantically meaningful than in general schemas. Parse additionalProperties from a schema element. If key is not present, defaults to `True`. Parse additionalProperties from a schema element. If key is not present, defaults to `True`. Parse an array schema element. Parse array items keyword to a schema Element. If not present, defaults to `Element()`. Parse dependencies keyword from schema. Create a composition element from a type and list of component elements. Filters out trivial elements, and simplifies compositions with only one composed element. Create a filter to pull out only relevant keywords for a given type. Convert titles in schemas to class names.
| 2.289284
| 2
|
requisitos/a-zerinho-o-um/resolucao_zerinho.py
|
robsoncartes/projeto-javalin
| 0
|
6625715
|
<reponame>robsoncartes/projeto-javalin
def get_vencedor(index):
if index == 0:
return "A"
elif index == 1:
return "B"
elif index == 2:
return "C"
numeros_texto = input()
numeros = numeros_texto.split(" ")
n_0 = numeros.count("0")
n_1 = numeros.count("1")
if n_0 == 1:
print (get_vencedor(numeros.index("0")))
elif n_1 == 1:
print (get_vencedor(numeros.index("1")))
else:
print ("*")
|
def get_vencedor(index):
if index == 0:
return "A"
elif index == 1:
return "B"
elif index == 2:
return "C"
numeros_texto = input()
numeros = numeros_texto.split(" ")
n_0 = numeros.count("0")
n_1 = numeros.count("1")
if n_0 == 1:
print (get_vencedor(numeros.index("0")))
elif n_1 == 1:
print (get_vencedor(numeros.index("1")))
else:
print ("*")
|
none
| 1
| 3.676899
| 4
|
|
src/rsactftool/RsaCtfTool.py
|
borari/RsaCtfTool
| 0
|
6625716
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
----------------------------------------------------------------------------
"THE BEER-WARE LICENSE" (Revision 42):
ganapati (@G4N4P4T1) wrote this file. As long as you retain this notice you
can do whatever you want with this stuff. If we meet some day, and you think
this stuff is worth it, you can buy me a beer in return.
----------------------------------------------------------------------------
"""
import sys
import logging
import argparse
import requests
import urllib3
import tempfile
from glob import glob
from Crypto.PublicKey import RSA
from rsactftool.lib.rsa_attack import RSAAttack
from rsactftool.lib.rsalibnum import n2s, invmod
from rsactftool.lib.utils import get_numeric_value, print_results
from os.path import dirname, basename, isfile, join
from urllib3.exceptions import InsecureRequestWarning
from rsactftool.lib.customlogger import CustomFormatter, logger_levels
from rsactftool.lib.keys_wrapper import (
generate_pq_from_n_and_p_or_q,
generate_keys_from_p_q_e_n,
PrivateKey,
)
def main():
# Remove insecure warning for factordb.com
urllib3.disable_warnings(InsecureRequestWarning)
# Change recursion limit for... you know, factorizing stuff...
sys.setrecursionlimit(5000)
logger = logging.getLogger("global_logger")
parser = argparse.ArgumentParser(description="RSA CTF Tool")
parser.add_argument(
"--publickey", help="public key file. You can use wildcards for multiple keys."
)
parser.add_argument(
"--output", help="output file for results (privates keys, plaintext data)."
)
parser.add_argument(
"--timeout", help="Timeout for long attacks.", default=30, type=int
)
parser.add_argument(
"--createpub",
help="Take n and e from cli and just print a public key then exit",
action="store_true",
)
parser.add_argument(
"--dumpkey",
help="Just dump the RSA variables from a key - n,e,d,p,q",
action="store_true",
)
parser.add_argument(
"--ext",
help="Extended dump of RSA private variables in --dumpkey mode - dp,dq,pinv,qinv).",
action="store_true",
)
parser.add_argument("--uncipherfile", help="uncipher a file", default=None)
parser.add_argument("--uncipher", help="uncipher a cipher", default=None)
parser.add_argument(
"--verbosity", help="verbose mode", choices=logger_levels.keys(), default="INFO"
)
parser.add_argument(
"--private", help="Display private key if recovered", action="store_true"
)
parser.add_argument(
"--ecmdigits",
type=int,
help="Optionally an estimate as to how long one of the primes is for ECM method",
default=None,
)
parser.add_argument("-n", help="Specify the modulus. format : int or 0xhex")
parser.add_argument(
"-p", help="Specify the first prime number. format : int or 0xhex"
)
parser.add_argument(
"-q", help="Specify the second prime number. format : int or 0xhex"
)
parser.add_argument("-e", help="Specify the public exponent. format : int or 0xhex")
parser.add_argument("--key", help="Specify the private key file.")
parser.add_argument("--password", help="Private key password if needed.")
# Dynamic load all attacks for choices in argparse
attacks = glob(join(dirname(__file__), "attacks", "single_key", "*.py"))
attacks += glob(join(dirname(__file__), "attacks", "multi_keys", "*.py"))
attacks_filtered = [
basename(f)[:-3] for f in attacks if isfile(f) and not f.endswith("__init__.py")
]
attacks_list = [_ for _ in attacks_filtered if _ != "nullattack"] + ["all"]
parser.add_argument(
"--attack", help="Specify the attack mode.", default="all", choices=attacks_list
)
args = parser.parse_args()
unciphers = []
# Set logger level
logging.basicConfig(level=logger_levels[args.verbosity],)
ch = logging.StreamHandler()
ch.setFormatter(CustomFormatter())
logger = logging.getLogger("global_logger")
logger.propagate = False
logger.addHandler(ch)
# If no arguments, diplay help and exit
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
# Add information
if not args.private:
logger.warning(
"private argument is not set, the private key will not be displayed, even if recovered."
)
# Parse longs if exists
if args.p is not None:
args.p = get_numeric_value(args.p)
if args.q is not None:
args.q = get_numeric_value(args.q)
if args.e is not None:
args.e = get_numeric_value(args.e)
# get n if we can
if args.n is not None:
args.n = get_numeric_value(args.n)
elif args.p is not None and args.q is not None:
args.n = args.p * args.q
# if we have uncipher but no uncipherfile
if args.uncipher is not None:
uncipher_array = []
for uncipher in args.uncipher.split(","):
uncipher = get_numeric_value(uncipher)
uncipher_array.append(n2s(uncipher))
args.uncipher = uncipher_array
# if we have uncipherfile
if args.uncipherfile is not None:
uncipher_array = []
for uncipher in args.uncipherfile.split(","):
try:
with open(uncipher, "rb") as cipherfile_fd:
uncipher_array.append(cipherfile_fd.read())
except OSError as e:
logger.info("--uncipherfile : file not found or not readable.")
exit(1)
args.uncipher = uncipher_array
# If we have a private key in input and uncipher in args (or uncipherfile)
if args.key and args.uncipher:
priv_key = PrivateKey(filename=args.key, password=args.password)
for u in args.uncipher:
unciphers.append(priv_key.decrypt(args.u))
print_results(args, None, priv_key, unciphers)
exit(0)
# If we have n and one of p and q, calculated the other
if args.n and (args.p or args.q):
args.p, args.q = generate_pq_from_n_and_p_or_q(args.n, args.p, args.q)
# Create pubkey if requested
if args.createpub:
pub_key, priv_key = generate_keys_from_p_q_e_n(args.p, args.q, args.e, args.n)
print(pub_key.decode("utf-8"))
exit(0)
# Load keys
tmpfile = None
if args.publickey is None and args.e is not None and args.n is not None:
tmpfile = tempfile.NamedTemporaryFile()
with open(tmpfile.name, "wb") as tmpfd:
tmpfd.write(RSA.construct((args.n, args.e)).publickey().exportKey())
args.publickey = [tmpfile.name]
elif args.publickey is not None:
if "*" in args.publickey or "?" in args.publickey:
pubkeyfilelist = glob(args.publickey)
args.publickey = pubkeyfilelist
elif "," in args.publickey:
args.publickey = args.publickey.split(",")
else:
args.publickey = [args.publickey]
# If we already have all informations
if (
args.p is not None
and args.q is not None
and args.e is not None
and args.n is not None
):
pub_key, priv_key = generate_keys_from_p_q_e_n(args.p, args.q, args.e, args.n)
if args.createpub:
print(pub_key)
if args.uncipher is not None:
for u in args.uncipher:
unciphers.append(priv_key.decrypt(args.uncipher))
print_results(args, args.publickey[0], priv_key, unciphers)
exit(0)
# Dump public key informations
if (
args.dumpkey
and not args.private
and args.uncipher is None
and args.uncipherfile is None
and args.publickey is not None
):
for publickey in args.publickey:
logger.info("Details for %s:" % publickey)
with open(publickey, "rb") as key_data_fd:
key = RSA.importKey(key_data_fd.read())
print("n: " + str(key.n))
print("e: " + str(key.e))
exit(0)
# if dumpkey mode dump the key components then quit
if args.key is not None and args.dumpkey:
key_data = open(args.key, "rb").read()
key = RSA.importKey(key_data)
print("n: " + str(key.n))
print("e: " + str(key.e))
if key.has_private():
print("d: " + str(key.d))
print("p: " + str(key.p))
print("q: " + str(key.q))
if args.ext:
dp = key.d % (key.p - 1)
dq = key.d % (key.q - 1)
pinv = invmod(key.p, key.q)
qinv = invmod(key.q, key.p)
print("dp: " + str(dp))
print("dq: " + str(dq))
print("pinv: " + str(pinv))
print("qinv: " + str(qinv))
exit(0)
# Run attacks
found = False
attackobj = RSAAttack(args)
if len(args.publickey) > 1:
found = attackobj.attack_multiple_keys(args.publickey, attacks_list)
if not found:
for publickey in args.publickey:
attackobj.implemented_attacks = []
logger.info("\n[*] Testing key %s." % publickey)
attackobj.attack_single_key(publickey, attacks_list)
attackobj.unciphered = []
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
----------------------------------------------------------------------------
"THE BEER-WARE LICENSE" (Revision 42):
ganapati (@G4N4P4T1) wrote this file. As long as you retain this notice you
can do whatever you want with this stuff. If we meet some day, and you think
this stuff is worth it, you can buy me a beer in return.
----------------------------------------------------------------------------
"""
import sys
import logging
import argparse
import requests
import urllib3
import tempfile
from glob import glob
from Crypto.PublicKey import RSA
from rsactftool.lib.rsa_attack import RSAAttack
from rsactftool.lib.rsalibnum import n2s, invmod
from rsactftool.lib.utils import get_numeric_value, print_results
from os.path import dirname, basename, isfile, join
from urllib3.exceptions import InsecureRequestWarning
from rsactftool.lib.customlogger import CustomFormatter, logger_levels
from rsactftool.lib.keys_wrapper import (
generate_pq_from_n_and_p_or_q,
generate_keys_from_p_q_e_n,
PrivateKey,
)
def main():
# Remove insecure warning for factordb.com
urllib3.disable_warnings(InsecureRequestWarning)
# Change recursion limit for... you know, factorizing stuff...
sys.setrecursionlimit(5000)
logger = logging.getLogger("global_logger")
parser = argparse.ArgumentParser(description="RSA CTF Tool")
parser.add_argument(
"--publickey", help="public key file. You can use wildcards for multiple keys."
)
parser.add_argument(
"--output", help="output file for results (privates keys, plaintext data)."
)
parser.add_argument(
"--timeout", help="Timeout for long attacks.", default=30, type=int
)
parser.add_argument(
"--createpub",
help="Take n and e from cli and just print a public key then exit",
action="store_true",
)
parser.add_argument(
"--dumpkey",
help="Just dump the RSA variables from a key - n,e,d,p,q",
action="store_true",
)
parser.add_argument(
"--ext",
help="Extended dump of RSA private variables in --dumpkey mode - dp,dq,pinv,qinv).",
action="store_true",
)
parser.add_argument("--uncipherfile", help="uncipher a file", default=None)
parser.add_argument("--uncipher", help="uncipher a cipher", default=None)
parser.add_argument(
"--verbosity", help="verbose mode", choices=logger_levels.keys(), default="INFO"
)
parser.add_argument(
"--private", help="Display private key if recovered", action="store_true"
)
parser.add_argument(
"--ecmdigits",
type=int,
help="Optionally an estimate as to how long one of the primes is for ECM method",
default=None,
)
parser.add_argument("-n", help="Specify the modulus. format : int or 0xhex")
parser.add_argument(
"-p", help="Specify the first prime number. format : int or 0xhex"
)
parser.add_argument(
"-q", help="Specify the second prime number. format : int or 0xhex"
)
parser.add_argument("-e", help="Specify the public exponent. format : int or 0xhex")
parser.add_argument("--key", help="Specify the private key file.")
parser.add_argument("--password", help="Private key password if needed.")
# Dynamic load all attacks for choices in argparse
attacks = glob(join(dirname(__file__), "attacks", "single_key", "*.py"))
attacks += glob(join(dirname(__file__), "attacks", "multi_keys", "*.py"))
attacks_filtered = [
basename(f)[:-3] for f in attacks if isfile(f) and not f.endswith("__init__.py")
]
attacks_list = [_ for _ in attacks_filtered if _ != "nullattack"] + ["all"]
parser.add_argument(
"--attack", help="Specify the attack mode.", default="all", choices=attacks_list
)
args = parser.parse_args()
unciphers = []
# Set logger level
logging.basicConfig(level=logger_levels[args.verbosity],)
ch = logging.StreamHandler()
ch.setFormatter(CustomFormatter())
logger = logging.getLogger("global_logger")
logger.propagate = False
logger.addHandler(ch)
# If no arguments, diplay help and exit
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
# Add information
if not args.private:
logger.warning(
"private argument is not set, the private key will not be displayed, even if recovered."
)
# Parse longs if exists
if args.p is not None:
args.p = get_numeric_value(args.p)
if args.q is not None:
args.q = get_numeric_value(args.q)
if args.e is not None:
args.e = get_numeric_value(args.e)
# get n if we can
if args.n is not None:
args.n = get_numeric_value(args.n)
elif args.p is not None and args.q is not None:
args.n = args.p * args.q
# if we have uncipher but no uncipherfile
if args.uncipher is not None:
uncipher_array = []
for uncipher in args.uncipher.split(","):
uncipher = get_numeric_value(uncipher)
uncipher_array.append(n2s(uncipher))
args.uncipher = uncipher_array
# if we have uncipherfile
if args.uncipherfile is not None:
uncipher_array = []
for uncipher in args.uncipherfile.split(","):
try:
with open(uncipher, "rb") as cipherfile_fd:
uncipher_array.append(cipherfile_fd.read())
except OSError as e:
logger.info("--uncipherfile : file not found or not readable.")
exit(1)
args.uncipher = uncipher_array
# If we have a private key in input and uncipher in args (or uncipherfile)
if args.key and args.uncipher:
priv_key = PrivateKey(filename=args.key, password=args.password)
for u in args.uncipher:
unciphers.append(priv_key.decrypt(args.u))
print_results(args, None, priv_key, unciphers)
exit(0)
# If we have n and one of p and q, calculated the other
if args.n and (args.p or args.q):
args.p, args.q = generate_pq_from_n_and_p_or_q(args.n, args.p, args.q)
# Create pubkey if requested
if args.createpub:
pub_key, priv_key = generate_keys_from_p_q_e_n(args.p, args.q, args.e, args.n)
print(pub_key.decode("utf-8"))
exit(0)
# Load keys
tmpfile = None
if args.publickey is None and args.e is not None and args.n is not None:
tmpfile = tempfile.NamedTemporaryFile()
with open(tmpfile.name, "wb") as tmpfd:
tmpfd.write(RSA.construct((args.n, args.e)).publickey().exportKey())
args.publickey = [tmpfile.name]
elif args.publickey is not None:
if "*" in args.publickey or "?" in args.publickey:
pubkeyfilelist = glob(args.publickey)
args.publickey = pubkeyfilelist
elif "," in args.publickey:
args.publickey = args.publickey.split(",")
else:
args.publickey = [args.publickey]
# If we already have all informations
if (
args.p is not None
and args.q is not None
and args.e is not None
and args.n is not None
):
pub_key, priv_key = generate_keys_from_p_q_e_n(args.p, args.q, args.e, args.n)
if args.createpub:
print(pub_key)
if args.uncipher is not None:
for u in args.uncipher:
unciphers.append(priv_key.decrypt(args.uncipher))
print_results(args, args.publickey[0], priv_key, unciphers)
exit(0)
# Dump public key informations
if (
args.dumpkey
and not args.private
and args.uncipher is None
and args.uncipherfile is None
and args.publickey is not None
):
for publickey in args.publickey:
logger.info("Details for %s:" % publickey)
with open(publickey, "rb") as key_data_fd:
key = RSA.importKey(key_data_fd.read())
print("n: " + str(key.n))
print("e: " + str(key.e))
exit(0)
# if dumpkey mode dump the key components then quit
if args.key is not None and args.dumpkey:
key_data = open(args.key, "rb").read()
key = RSA.importKey(key_data)
print("n: " + str(key.n))
print("e: " + str(key.e))
if key.has_private():
print("d: " + str(key.d))
print("p: " + str(key.p))
print("q: " + str(key.q))
if args.ext:
dp = key.d % (key.p - 1)
dq = key.d % (key.q - 1)
pinv = invmod(key.p, key.q)
qinv = invmod(key.q, key.p)
print("dp: " + str(dp))
print("dq: " + str(dq))
print("pinv: " + str(pinv))
print("qinv: " + str(qinv))
exit(0)
# Run attacks
found = False
attackobj = RSAAttack(args)
if len(args.publickey) > 1:
found = attackobj.attack_multiple_keys(args.publickey, attacks_list)
if not found:
for publickey in args.publickey:
attackobj.implemented_attacks = []
logger.info("\n[*] Testing key %s." % publickey)
attackobj.attack_single_key(publickey, attacks_list)
attackobj.unciphered = []
|
en
| 0.724527
|
#!/usr/bin/env python3 # -*- coding: utf-8 -*- ---------------------------------------------------------------------------- "THE BEER-WARE LICENSE" (Revision 42): ganapati (@G4N4P4T1) wrote this file. As long as you retain this notice you can do whatever you want with this stuff. If we meet some day, and you think this stuff is worth it, you can buy me a beer in return. ---------------------------------------------------------------------------- # Remove insecure warning for factordb.com # Change recursion limit for... you know, factorizing stuff... # Dynamic load all attacks for choices in argparse # Set logger level # If no arguments, diplay help and exit # Add information # Parse longs if exists # get n if we can # if we have uncipher but no uncipherfile # if we have uncipherfile # If we have a private key in input and uncipher in args (or uncipherfile) # If we have n and one of p and q, calculated the other # Create pubkey if requested # Load keys # If we already have all informations # Dump public key informations # if dumpkey mode dump the key components then quit # Run attacks
| 1.97197
| 2
|
neurovault/settings.py
|
chrisgorgo/NeuroVault
| 0
|
6625717
|
<gh_stars>0
# Django settings for neurovault project.
import os
import sys
import tempfile
from datetime import timedelta
import matplotlib
from kombu import Exchange, Queue
matplotlib.use('Agg')
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DEBUG = True
DOMAIN_NAME = "https://neurovault.org"
TEMPLATE_DEBUG = DEBUG
ADMINS = (
(('Chris', '<EMAIL>'))
)
MANAGERS = ADMINS
SITE_ID = 1
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'postgres',
# The following settings are not used with sqlite3:
'USER': 'postgres',
'HOST': 'db', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '5432', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Berlin'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
MEDIA_URL = '/public/media/'
PRIVATE_MEDIA_ROOT = '/var/www/image_data'
PRIVATE_MEDIA_URL = '/media/images'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = '/var/www/static'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'neurovault.apps.statmaps.middleware.CollectionRedirectMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'neurovault.urls'
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'neurovault.wsgi.application'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': (),
'OPTIONS': {'context_processors': ("django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
'django.core.context_processors.request'),
'loaders': ('hamlpy.template.loaders.HamlPyFilesystemLoader',
'hamlpy.template.loaders.HamlPyAppDirectoriesLoader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)}
}
]
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'neurovault.apps.main',
'neurovault.apps.statmaps',
'neurovault.apps.users',
'django.contrib.sitemaps',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
#'django.contrib.admindocs',
'social.apps.django_app.default',
'rest_framework',
'taggit',
'crispy_forms',
'taggit_templatetags',
#'south',
'dbbackup',
'polymorphic',
'djcelery',
'django_cleanup',
'file_resubmit',
'django_mailgun',
'django_hstore',
'guardian',
'oauth2_provider',
'fixture_media',
'raven.contrib.django.raven_compat',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': False,
# 'filters': {
# 'require_debug_false': {
# '()': 'django.utils.log.RequireDebugFalse'
# }
# },
# 'handlers': {
# 'mail_admins': {
# 'level': 'ERROR',
# 'filters': ['require_debug_false'],
# 'class': 'django.utils.log.AdminEmailHandler'
# }
# },
# 'loggers': {
# 'django.request': {
# 'handlers': ['mail_admins'],
# 'level': 'ERROR',
# 'propagate': True,
# },
# }
# }
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'social.backends.facebook.FacebookOAuth2',
'social.backends.google.GoogleOAuth2',
'guardian.backends.ObjectPermissionBackend',
)
SOCIAL_AUTH_PIPELINE = (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'social.pipeline.social_auth.associate_by_email', # <--- enable this one
'social.pipeline.user.create_user',
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details'
)
SOCIAL_AUTH_FACEBOOK_SCOPE = ['email']
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
REST_FRAMEWORK = {
# Use hyperlinked styles by default.
# Only used if the `serializer_class` attribute is not set on a view.
'DEFAULT_MODEL_SERIALIZER_CLASS':
'rest_framework.serializers.HyperlinkedModelSerializer',
# LimitOffsetPagination will allow to set a ?limit= and ?offset=
# variable in the URL.
'DEFAULT_PAGINATION_CLASS':
'neurovault.api.pagination.StandardResultPagination',
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
'DEFAULT_AUTHENTICATION_CLASSES': (
'oauth2_provider.ext.rest_framework.OAuth2Authentication',
),
'DEFAULT_RENDERER_CLASSES': (
'neurovault.api.utils.ExplicitUnicodeJSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'UNICODE_JSON': True,
}
OAUTH2_PROVIDER = {
'REQUEST_APPROVAL_PROMPT': 'auto'
}
LOGIN_REDIRECT_URL = '/my_collections/'
#LOGIN_URL = '/login-form/'
#LOGIN_ERROR_URL = '/login-error/'
CRISPY_TEMPLATE_PACK = 'bootstrap3'
DBBACKUP_STORAGE = 'dbbackup.storage.dropbox_storage'
DBBACKUP_TOKENS_FILEPATH = '/home/filo/dbtokens'
DBBACKUP_POSTGRES_BACKUP_COMMAND = 'export PGPASSWORD=neurovault\n pg_dump --username={adminuser} --host={host} --port={port} {databasename} >'
# For Apache, use 'sendfile.backends.xsendfile'
# For Nginx, use 'sendfile.backends.nginx'
# For Devserver, use 'sendfile.backends.development'
SENDFILE_BACKEND = 'sendfile.backends.development'
PRIVATE_MEDIA_REDIRECT_HEADER = 'X-Accel-Redirect'
PYCORTEX_DATASTORE = os.path.join(BASE_DIR,'pycortex_data')
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
"file_resubmit": {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
"LOCATION": '/tmp/file_resubmit/'
}
}
# Mandrill config
EMAIL_BACKEND = 'django_mailgun.MailgunBackend'
MAILGUN_ACCESS_KEY = '<KEY>' # replace with a real key in production
MAILGUN_SERVER_NAME = 'samples.mailgun.org'# replace with 'neurovault.org' in production
DEFAULT_FROM_EMAIL = "<EMAIL>"
if os.path.exists('/usr/local/share/pycortex/db/fsaverage'):
STATICFILES_DIRS = (
('pycortex-resources', '/usr/local/lib/python2.7/site-packages/cortex/webgl/resources'),
('pycortex-ctmcache', '/usr/local/share/pycortex/db/fsaverage/cache')
)
# Celery config
BROKER_URL = 'redis://redis:6379/0'
CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_DEFAULT_QUEUE = 'default'
CELERY_QUEUES = (
Queue('default', Exchange('default'), routing_key='default'),
)
CELERY_IMPORTS = ('neurovault.apps.statmaps.tasks', )
CELERYBEAT_SCHEDULE = {
'anima_crawl_every day': {
'task': 'crawl_anima',
'schedule': timedelta(days=1)
},
}
CELERY_TIMEZONE = 'Europe/Berlin'
ANONYMOUS_USER_ID = -1
DEFAULT_OAUTH_APPLICATION_ID = -1
DEFAULT_OAUTH_APP_NAME = 'DefaultOAuthApp'
DEFAULT_OAUTH_APP_OWNER_ID = -2
DEFAULT_OAUTH_APP_OWNER_USERNAME = 'DefaultAppOwner'
OAUTH_PERSONAL_TOKEN_LENGTH = 40
# Bogus secret key.
try:
from secrets import *
except ImportError:
from bogus_secrets import *
try:
from local_settings import *
except ImportError:
pass
# freesurfer/pycortex environment
os.environ["XDG_CONFIG_HOME"] = PYCORTEX_DATASTORE
os.environ["FREESURFER_HOME"] = "/opt/freesurfer"
os.environ["SUBJECTS_DIR"] = os.path.join(os.environ["FREESURFER_HOME"],"subjects")
os.environ["FSLOUTPUTTYPE"] = "NIFTI_GZ"
# provToolbox path
os.environ["PATH"] += os.pathsep + '/path/to/lib/provToolbox/bin'
#CELERYBEAT_SCHEDULE = {
# 'run_make_correlation_df': {
# 'task': 'neurovault.apps.statmaps.tasks...',
# 'schedule': timedelta(minutes=30),
# },
#}
# or manage periodic schedule in django admin
#CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'
if "test" in sys.argv or "benchmark" in sys.argv:
test_media_root = os.path.join(tempfile.mkdtemp(prefix="neurovault_test_"))
PRIVATE_MEDIA_ROOT = test_media_root
CELERY_ALWAYS_EAGER = True
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
TAGGIT_CASE_INSENSITIVE=True
FIXTURE_DIRS = (
'apps/statmaps/fixtures/',
)
MEDIA_ROOT = PRIVATE_MEDIA_ROOT
|
# Django settings for neurovault project.
import os
import sys
import tempfile
from datetime import timedelta
import matplotlib
from kombu import Exchange, Queue
matplotlib.use('Agg')
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DEBUG = True
DOMAIN_NAME = "https://neurovault.org"
TEMPLATE_DEBUG = DEBUG
ADMINS = (
(('Chris', '<EMAIL>'))
)
MANAGERS = ADMINS
SITE_ID = 1
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'postgres',
# The following settings are not used with sqlite3:
'USER': 'postgres',
'HOST': 'db', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '5432', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Berlin'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
MEDIA_URL = '/public/media/'
PRIVATE_MEDIA_ROOT = '/var/www/image_data'
PRIVATE_MEDIA_URL = '/media/images'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = '/var/www/static'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'neurovault.apps.statmaps.middleware.CollectionRedirectMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'neurovault.urls'
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'neurovault.wsgi.application'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': (),
'OPTIONS': {'context_processors': ("django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
'django.core.context_processors.request'),
'loaders': ('hamlpy.template.loaders.HamlPyFilesystemLoader',
'hamlpy.template.loaders.HamlPyAppDirectoriesLoader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)}
}
]
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'neurovault.apps.main',
'neurovault.apps.statmaps',
'neurovault.apps.users',
'django.contrib.sitemaps',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
#'django.contrib.admindocs',
'social.apps.django_app.default',
'rest_framework',
'taggit',
'crispy_forms',
'taggit_templatetags',
#'south',
'dbbackup',
'polymorphic',
'djcelery',
'django_cleanup',
'file_resubmit',
'django_mailgun',
'django_hstore',
'guardian',
'oauth2_provider',
'fixture_media',
'raven.contrib.django.raven_compat',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': False,
# 'filters': {
# 'require_debug_false': {
# '()': 'django.utils.log.RequireDebugFalse'
# }
# },
# 'handlers': {
# 'mail_admins': {
# 'level': 'ERROR',
# 'filters': ['require_debug_false'],
# 'class': 'django.utils.log.AdminEmailHandler'
# }
# },
# 'loggers': {
# 'django.request': {
# 'handlers': ['mail_admins'],
# 'level': 'ERROR',
# 'propagate': True,
# },
# }
# }
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'social.backends.facebook.FacebookOAuth2',
'social.backends.google.GoogleOAuth2',
'guardian.backends.ObjectPermissionBackend',
)
SOCIAL_AUTH_PIPELINE = (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'social.pipeline.social_auth.associate_by_email', # <--- enable this one
'social.pipeline.user.create_user',
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details'
)
SOCIAL_AUTH_FACEBOOK_SCOPE = ['email']
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
REST_FRAMEWORK = {
# Use hyperlinked styles by default.
# Only used if the `serializer_class` attribute is not set on a view.
'DEFAULT_MODEL_SERIALIZER_CLASS':
'rest_framework.serializers.HyperlinkedModelSerializer',
# LimitOffsetPagination will allow to set a ?limit= and ?offset=
# variable in the URL.
'DEFAULT_PAGINATION_CLASS':
'neurovault.api.pagination.StandardResultPagination',
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
'DEFAULT_AUTHENTICATION_CLASSES': (
'oauth2_provider.ext.rest_framework.OAuth2Authentication',
),
'DEFAULT_RENDERER_CLASSES': (
'neurovault.api.utils.ExplicitUnicodeJSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'UNICODE_JSON': True,
}
OAUTH2_PROVIDER = {
'REQUEST_APPROVAL_PROMPT': 'auto'
}
LOGIN_REDIRECT_URL = '/my_collections/'
#LOGIN_URL = '/login-form/'
#LOGIN_ERROR_URL = '/login-error/'
CRISPY_TEMPLATE_PACK = 'bootstrap3'
DBBACKUP_STORAGE = 'dbbackup.storage.dropbox_storage'
DBBACKUP_TOKENS_FILEPATH = '/home/filo/dbtokens'
DBBACKUP_POSTGRES_BACKUP_COMMAND = 'export PGPASSWORD=neurovault\n pg_dump --username={adminuser} --host={host} --port={port} {databasename} >'
# For Apache, use 'sendfile.backends.xsendfile'
# For Nginx, use 'sendfile.backends.nginx'
# For Devserver, use 'sendfile.backends.development'
SENDFILE_BACKEND = 'sendfile.backends.development'
PRIVATE_MEDIA_REDIRECT_HEADER = 'X-Accel-Redirect'
PYCORTEX_DATASTORE = os.path.join(BASE_DIR,'pycortex_data')
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
"file_resubmit": {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
"LOCATION": '/tmp/file_resubmit/'
}
}
# Mandrill config
EMAIL_BACKEND = 'django_mailgun.MailgunBackend'
MAILGUN_ACCESS_KEY = '<KEY>' # replace with a real key in production
MAILGUN_SERVER_NAME = 'samples.mailgun.org'# replace with 'neurovault.org' in production
DEFAULT_FROM_EMAIL = "<EMAIL>"
if os.path.exists('/usr/local/share/pycortex/db/fsaverage'):
STATICFILES_DIRS = (
('pycortex-resources', '/usr/local/lib/python2.7/site-packages/cortex/webgl/resources'),
('pycortex-ctmcache', '/usr/local/share/pycortex/db/fsaverage/cache')
)
# Celery config
BROKER_URL = 'redis://redis:6379/0'
CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_DEFAULT_QUEUE = 'default'
CELERY_QUEUES = (
Queue('default', Exchange('default'), routing_key='default'),
)
CELERY_IMPORTS = ('neurovault.apps.statmaps.tasks', )
CELERYBEAT_SCHEDULE = {
'anima_crawl_every day': {
'task': 'crawl_anima',
'schedule': timedelta(days=1)
},
}
CELERY_TIMEZONE = 'Europe/Berlin'
ANONYMOUS_USER_ID = -1
DEFAULT_OAUTH_APPLICATION_ID = -1
DEFAULT_OAUTH_APP_NAME = 'DefaultOAuthApp'
DEFAULT_OAUTH_APP_OWNER_ID = -2
DEFAULT_OAUTH_APP_OWNER_USERNAME = 'DefaultAppOwner'
OAUTH_PERSONAL_TOKEN_LENGTH = 40
# Bogus secret key.
try:
from secrets import *
except ImportError:
from bogus_secrets import *
try:
from local_settings import *
except ImportError:
pass
# freesurfer/pycortex environment
os.environ["XDG_CONFIG_HOME"] = PYCORTEX_DATASTORE
os.environ["FREESURFER_HOME"] = "/opt/freesurfer"
os.environ["SUBJECTS_DIR"] = os.path.join(os.environ["FREESURFER_HOME"],"subjects")
os.environ["FSLOUTPUTTYPE"] = "NIFTI_GZ"
# provToolbox path
os.environ["PATH"] += os.pathsep + '/path/to/lib/provToolbox/bin'
#CELERYBEAT_SCHEDULE = {
# 'run_make_correlation_df': {
# 'task': 'neurovault.apps.statmaps.tasks...',
# 'schedule': timedelta(minutes=30),
# },
#}
# or manage periodic schedule in django admin
#CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'
if "test" in sys.argv or "benchmark" in sys.argv:
test_media_root = os.path.join(tempfile.mkdtemp(prefix="neurovault_test_"))
PRIVATE_MEDIA_ROOT = test_media_root
CELERY_ALWAYS_EAGER = True
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
TAGGIT_CASE_INSENSITIVE=True
FIXTURE_DIRS = (
'apps/statmaps/fixtures/',
)
MEDIA_ROOT = PRIVATE_MEDIA_ROOT
|
en
| 0.574025
|
# Django settings for neurovault project. # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. # The following settings are not used with sqlite3: # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP. # Set to empty string for default. # Hosts/domain names that are valid for this site; required if DEBUG is False # See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # In a Windows environment this must be set to your system time zone. # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. # If you set this to False, Django will not use timezone-aware datetimes. # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/var/www/example.com/media/" # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/var/www/example.com/static/" # URL prefix for static files. # Example: "http://example.com/static/", "http://static.example.com/" # List of finder classes that know how to find static files in # various locations. # 'django.contrib.staticfiles.finders.DefaultStorageFinder', # Uncomment the next line for simple clickjacking protection: # 'django.middleware.clickjacking.XFrameOptionsMiddleware', # Python dotted path to the WSGI application used by Django's runserver. # Uncomment the next line to enable the admin: # Uncomment the next line to enable admin documentation: #'django.contrib.admindocs', #'south', # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. # LOGGING = { # 'version': 1, # 'disable_existing_loggers': False, # 'filters': { # 'require_debug_false': { # '()': 'django.utils.log.RequireDebugFalse' # } # }, # 'handlers': { # 'mail_admins': { # 'level': 'ERROR', # 'filters': ['require_debug_false'], # 'class': 'django.utils.log.AdminEmailHandler' # } # }, # 'loggers': { # 'django.request': { # 'handlers': ['mail_admins'], # 'level': 'ERROR', # 'propagate': True, # }, # } # } # <--- enable this one # Use hyperlinked styles by default. # Only used if the `serializer_class` attribute is not set on a view. # LimitOffsetPagination will allow to set a ?limit= and ?offset= # variable in the URL. # Use Django's standard `django.contrib.auth` permissions, # or allow read-only access for unauthenticated users. #LOGIN_URL = '/login-form/' #LOGIN_ERROR_URL = '/login-error/' # For Apache, use 'sendfile.backends.xsendfile' # For Nginx, use 'sendfile.backends.nginx' # For Devserver, use 'sendfile.backends.development' # Mandrill config # replace with a real key in production # Celery config # Bogus secret key. # freesurfer/pycortex environment # provToolbox path #CELERYBEAT_SCHEDULE = { # 'run_make_correlation_df': { # 'task': 'neurovault.apps.statmaps.tasks...', # 'schedule': timedelta(minutes=30), # }, #} # or manage periodic schedule in django admin #CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'
| 1.95738
| 2
|
src/data/common.py
|
praeclarumjj3/AOT-GAN-Experiments
| 3
|
6625718
|
import zipfile
class ZipReader(object):
file_dict = dict()
def __init__(self):
super(ZipReader, self).__init__()
@staticmethod
def build_file_dict(path):
file_dict = ZipReader.file_dict
if path in file_dict:
return file_dict[path]
else:
file_handle = zipfile.ZipFile(path, mode='r', allowZip64=True)
file_dict[path] = file_handle
return file_dict[path]
@staticmethod
def imread(path, image_name):
zfile = ZipReader.build_file_dict(path)
data = zfile.read(image_name)
im = Image.open(io.BytesIO(data))
return im
|
import zipfile
class ZipReader(object):
file_dict = dict()
def __init__(self):
super(ZipReader, self).__init__()
@staticmethod
def build_file_dict(path):
file_dict = ZipReader.file_dict
if path in file_dict:
return file_dict[path]
else:
file_handle = zipfile.ZipFile(path, mode='r', allowZip64=True)
file_dict[path] = file_handle
return file_dict[path]
@staticmethod
def imread(path, image_name):
zfile = ZipReader.build_file_dict(path)
data = zfile.read(image_name)
im = Image.open(io.BytesIO(data))
return im
|
none
| 1
| 3.090914
| 3
|
|
Python_Challenge_115/7/C.py
|
LIkelion-at-KOREATECH/LikeLion_Django_Study_Summary
| 28
|
6625719
|
'''
Statement
It is possible to place 8 queens on an 8×8 chessboard so that no two queens threaten each other. Thus, it requires that no two queens share the same row, column, or diagonal.
Given a placement of 8 queens on the chessboard. If there is a pair of queens that violates this rule, print YES, otherwise print NO. The input consists of eight coordinate pairs, one pair per line, with each pair giving the position of a queen on a standard chessboard with rows and columns numbered from 1 to 8.
Example input
1 5
2 3
3 1
4 7
5 2
6 8
7 6
8 4
(shown on the picture)
Example output
NO
'''
eight_quuen = []
check = False
for _ in range(8):
eight_quuen.append(list(map(int, input().split())))
for i in eight_quuen:
if check:
break
for j in eight_quuen:
if i == j:
continue
else:
if i[0] == j[0] or i[1] == j[1]:
check = True
break
else:
if i[0] != j[0] and i[1] != j[0]:
if abs(i[0] - j[0]) == abs(i[1] - j[1]):
check = True
break
print("No") if not check else print("Yes")
|
'''
Statement
It is possible to place 8 queens on an 8×8 chessboard so that no two queens threaten each other. Thus, it requires that no two queens share the same row, column, or diagonal.
Given a placement of 8 queens on the chessboard. If there is a pair of queens that violates this rule, print YES, otherwise print NO. The input consists of eight coordinate pairs, one pair per line, with each pair giving the position of a queen on a standard chessboard with rows and columns numbered from 1 to 8.
Example input
1 5
2 3
3 1
4 7
5 2
6 8
7 6
8 4
(shown on the picture)
Example output
NO
'''
eight_quuen = []
check = False
for _ in range(8):
eight_quuen.append(list(map(int, input().split())))
for i in eight_quuen:
if check:
break
for j in eight_quuen:
if i == j:
continue
else:
if i[0] == j[0] or i[1] == j[1]:
check = True
break
else:
if i[0] != j[0] and i[1] != j[0]:
if abs(i[0] - j[0]) == abs(i[1] - j[1]):
check = True
break
print("No") if not check else print("Yes")
|
en
| 0.817594
|
Statement It is possible to place 8 queens on an 8×8 chessboard so that no two queens threaten each other. Thus, it requires that no two queens share the same row, column, or diagonal. Given a placement of 8 queens on the chessboard. If there is a pair of queens that violates this rule, print YES, otherwise print NO. The input consists of eight coordinate pairs, one pair per line, with each pair giving the position of a queen on a standard chessboard with rows and columns numbered from 1 to 8. Example input 1 5 2 3 3 1 4 7 5 2 6 8 7 6 8 4 (shown on the picture) Example output NO
| 3.922832
| 4
|
tests/test_cmdline.py
|
hemanthmantri/coveragepy
| 0
|
6625720
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Test cmdline.py for coverage.py."""
import pprint
import re
import sys
import textwrap
from unittest import mock
import pytest
import coverage
import coverage.cmdline
from coverage import env
from coverage.control import DEFAULT_DATAFILE
from coverage.config import CoverageConfig
from coverage.exceptions import _ExceptionDuringRun
from coverage.version import __url__
from tests.coveragetest import CoverageTest, OK, ERR, command_line
from tests.helpers import os_sep
class BaseCmdLineTest(CoverageTest):
"""Tests of execution paths through the command line interpreter."""
run_in_temp_dir = False
# Make a dict mapping function names to the default values that cmdline.py
# uses when calling the function.
_defaults = mock.Mock()
_defaults.Coverage().annotate(
directory=None, ignore_errors=None, include=None, omit=None, morfs=[],
contexts=None,
)
_defaults.Coverage().html_report(
directory=None, ignore_errors=None, include=None, omit=None, morfs=[],
skip_covered=None, show_contexts=None, title=None, contexts=None,
skip_empty=None, precision=None,
)
_defaults.Coverage().report(
ignore_errors=None, include=None, omit=None, morfs=[],
show_missing=None, skip_covered=None, contexts=None, skip_empty=None, precision=None,
sort=None,
)
_defaults.Coverage().xml_report(
ignore_errors=None, include=None, omit=None, morfs=[], outfile=None,
contexts=None, skip_empty=None,
)
_defaults.Coverage().json_report(
ignore_errors=None, include=None, omit=None, morfs=[], outfile=None,
contexts=None, pretty_print=None, show_contexts=None,
)
_defaults.Coverage().lcov_report(
ignore_errors=None, include=None, omit=None, morfs=[], outfile=None,
contexts=None,
)
_defaults.Coverage(
data_file=DEFAULT_DATAFILE,
cover_pylib=None, data_suffix=None, timid=None, branch=None,
config_file=True, source=None, include=None, omit=None, debug=None,
concurrency=None, check_preimported=True, context=None, messages=True,
)
DEFAULT_KWARGS = {name: kw for name, _, kw in _defaults.mock_calls}
def model_object(self):
"""Return a Mock suitable for use in CoverageScript."""
mk = mock.Mock()
cov = mk.Coverage.return_value
# The mock needs options.
mk.config = CoverageConfig()
cov.get_option = mk.config.get_option
cov.set_option = mk.config.set_option
# Get the type right for the result of reporting.
cov.report.return_value = 50.0
cov.html_report.return_value = 50.0
cov.xml_report.return_value = 50.0
cov.json_report.return_value = 50.0
cov.lcov_report.return_value = 50.0
return mk
# Global names in cmdline.py that will be mocked during the tests.
MOCK_GLOBALS = ['Coverage', 'PyRunner', 'show_help']
def mock_command_line(self, args, options=None):
"""Run `args` through the command line, with a Mock.
`options` is a dict of names and values to pass to `set_option`.
Returns the Mock it used and the status code returned.
"""
mk = self.model_object()
if options is not None:
for name, value in options.items():
mk.config.set_option(name, value)
patchers = [
mock.patch("coverage.cmdline."+name, getattr(mk, name))
for name in self.MOCK_GLOBALS
]
for patcher in patchers:
patcher.start()
try:
ret = command_line(args)
finally:
for patcher in patchers:
patcher.stop()
return mk, ret
def cmd_executes(self, args, code, ret=OK, options=None):
"""Assert that the `args` end up executing the sequence in `code`."""
called, status = self.mock_command_line(args, options=options)
assert status == ret, f"Wrong status: got {status!r}, wanted {ret!r}"
# Remove all indentation, and execute with mock globals
code = textwrap.dedent(code)
expected = self.model_object()
globs = {n: getattr(expected, n) for n in self.MOCK_GLOBALS}
code_obj = compile(code, "<code>", "exec")
eval(code_obj, globs, {}) # pylint: disable=eval-used
# Many of our functions take a lot of arguments, and cmdline.py
# calls them with many. But most of them are just the defaults, which
# we don't want to have to repeat in all tests. For each call, apply
# the defaults. This lets the tests just mention the interesting ones.
for name, _, kwargs in expected.mock_calls:
for k, v in self.DEFAULT_KWARGS.get(name, {}).items():
kwargs.setdefault(k, v)
self.assert_same_mock_calls(expected, called)
def cmd_executes_same(self, args1, args2):
"""Assert that the `args1` executes the same as `args2`."""
m1, r1 = self.mock_command_line(args1)
m2, r2 = self.mock_command_line(args2)
assert r1 == r2
self.assert_same_mock_calls(m1, m2)
def assert_same_mock_calls(self, m1, m2):
"""Assert that `m1.mock_calls` and `m2.mock_calls` are the same."""
# Use a real equality comparison, but if it fails, use a nicer assert
# so we can tell what's going on. We have to use the real == first due
# to CmdOptionParser.__eq__
if m1.mock_calls != m2.mock_calls:
pp1 = pprint.pformat(m1.mock_calls)
pp2 = pprint.pformat(m2.mock_calls)
assert pp1+'\n' == pp2+'\n'
def cmd_help(self, args, help_msg=None, topic=None, ret=ERR):
"""Run a command line, and check that it prints the right help.
Only the last function call in the mock is checked, which should be the
help message that we want to see.
"""
mk, status = self.mock_command_line(args)
assert status == ret, f"Wrong status: got {status}, wanted {ret}"
if help_msg:
assert mk.mock_calls[-1] == ('show_help', (help_msg,), {})
else:
assert mk.mock_calls[-1] == ('show_help', (), {'topic': topic})
class BaseCmdLineTestTest(BaseCmdLineTest):
"""Tests that our BaseCmdLineTest helpers work."""
def test_cmd_executes_same(self):
# All the other tests here use self.cmd_executes_same in successful
# ways, so here we just check that it fails.
with pytest.raises(AssertionError):
self.cmd_executes_same("run", "debug")
class CmdLineTest(BaseCmdLineTest):
"""Tests of the coverage.py command line."""
def test_annotate(self):
# coverage annotate [-d DIR] [-i] [--omit DIR,...] [FILE1 FILE2 ...]
self.cmd_executes("annotate", """\
cov = Coverage()
cov.load()
cov.annotate()
""")
self.cmd_executes("annotate -d dir1", """\
cov = Coverage()
cov.load()
cov.annotate(directory="dir1")
""")
self.cmd_executes("annotate -i", """\
cov = Coverage()
cov.load()
cov.annotate(ignore_errors=True)
""")
self.cmd_executes("annotate --omit fooey", """\
cov = Coverage(omit=["fooey"])
cov.load()
cov.annotate(omit=["fooey"])
""")
self.cmd_executes("annotate --omit fooey,booey", """\
cov = Coverage(omit=["fooey", "booey"])
cov.load()
cov.annotate(omit=["fooey", "booey"])
""")
self.cmd_executes("annotate mod1", """\
cov = Coverage()
cov.load()
cov.annotate(morfs=["mod1"])
""")
self.cmd_executes("annotate mod1 mod2 mod3", """\
cov = Coverage()
cov.load()
cov.annotate(morfs=["mod1", "mod2", "mod3"])
""")
def test_combine(self):
# coverage combine with args
self.cmd_executes("combine datadir1", """\
cov = Coverage()
cov.combine(["datadir1"], strict=True, keep=False)
cov.save()
""")
# coverage combine, appending
self.cmd_executes("combine --append datadir1", """\
cov = Coverage()
cov.load()
cov.combine(["datadir1"], strict=True, keep=False)
cov.save()
""")
# coverage combine without args
self.cmd_executes("combine", """\
cov = Coverage()
cov.combine(None, strict=True, keep=False)
cov.save()
""")
# coverage combine quietly
self.cmd_executes("combine -q", """\
cov = Coverage(messages=False)
cov.combine(None, strict=True, keep=False)
cov.save()
""")
self.cmd_executes("combine --quiet", """\
cov = Coverage(messages=False)
cov.combine(None, strict=True, keep=False)
cov.save()
""")
self.cmd_executes("combine --data-file=foo.cov", """\
cov = Coverage(data_file="foo.cov")
cov.combine(None, strict=True, keep=False)
cov.save()
""")
def test_combine_doesnt_confuse_options_with_args(self):
# https://github.com/nedbat/coveragepy/issues/385
self.cmd_executes("combine --rcfile cov.ini", """\
cov = Coverage(config_file='cov.ini')
cov.combine(None, strict=True, keep=False)
cov.save()
""")
self.cmd_executes("combine --rcfile cov.ini data1 data2/more", """\
cov = Coverage(config_file='cov.ini')
cov.combine(["data1", "data2/more"], strict=True, keep=False)
cov.save()
""")
@pytest.mark.parametrize("cmd, output", [
("debug", "What information would you like: config, data, sys, premain?"),
("debug foo", "Don't know what you mean by 'foo'"),
("debug sys config", "Only one topic at a time, please"),
])
def test_debug(self, cmd, output):
self.cmd_help(cmd, output)
def test_debug_sys(self):
self.command_line("debug sys")
out = self.stdout()
assert "version:" in out
assert "data_file:" in out
def test_debug_config(self):
self.command_line("debug config")
out = self.stdout()
assert "cover_pylib:" in out
assert "skip_covered:" in out
assert "skip_empty:" in out
def test_debug_premain(self):
self.command_line("debug premain")
out = self.stdout()
# ... many lines ...
# pytest_pyfunc_call : /Users/ned/cov/trunk/.tox/py39/site-packages/_pytest/python.py:183
# test_debug_premain : /Users/ned/cov/trunk/tests/test_cmdline.py:284
# command_line : /Users/ned/cov/trunk/tests/coveragetest.py:309
# command_line : /Users/ned/cov/trunk/tests/coveragetest.py:472
# command_line : /Users/ned/cov/trunk/coverage/cmdline.py:592
# do_debug : /Users/ned/cov/trunk/coverage/cmdline.py:804
assert re.search(r"(?m)^\s+test_debug_premain : .*[/\\]tests[/\\]test_cmdline.py:\d+$", out)
assert re.search(r"(?m)^\s+command_line : .*[/\\]coverage[/\\]cmdline.py:\d+$", out)
assert re.search(r"(?m)^\s+do_debug : .*[/\\]coverage[/\\]cmdline.py:\d+$", out)
def test_erase(self):
# coverage erase
self.cmd_executes("erase", """\
cov = Coverage()
cov.erase()
""")
self.cmd_executes("erase --data-file=foo.cov", """\
cov = Coverage(data_file="foo.cov")
cov.erase()
""")
def test_version(self):
# coverage --version
self.cmd_help("--version", topic="version", ret=OK)
def test_help_option(self):
# coverage -h
self.cmd_help("-h", topic="help", ret=OK)
self.cmd_help("--help", topic="help", ret=OK)
def test_help_command(self):
self.cmd_executes("help", "show_help(topic='help')")
def test_cmd_help(self):
self.cmd_executes("run --help", "show_help(parser='<CmdOptionParser:run>')")
self.cmd_executes_same("help run", "run --help")
def test_html(self):
# coverage html -d DIR [-i] [--omit DIR,...] [FILE1 FILE2 ...]
self.cmd_executes("html", """\
cov = Coverage()
cov.load()
cov.html_report()
""")
self.cmd_executes("html -d dir1", """\
cov = Coverage()
cov.load()
cov.html_report(directory="dir1")
""")
self.cmd_executes("html -i", """\
cov = Coverage()
cov.load()
cov.html_report(ignore_errors=True)
""")
self.cmd_executes("html --omit fooey", """\
cov = Coverage(omit=["fooey"])
cov.load()
cov.html_report(omit=["fooey"])
""")
self.cmd_executes("html --omit fooey,booey", """\
cov = Coverage(omit=["fooey", "booey"])
cov.load()
cov.html_report(omit=["fooey", "booey"])
""")
self.cmd_executes("html mod1", """\
cov = Coverage()
cov.load()
cov.html_report(morfs=["mod1"])
""")
self.cmd_executes("html mod1 mod2 mod3", """\
cov = Coverage()
cov.load()
cov.html_report(morfs=["mod1", "mod2", "mod3"])
""")
self.cmd_executes("html --precision=3", """\
cov = Coverage()
cov.load()
cov.html_report(precision=3)
""")
self.cmd_executes("html --title=Hello_there", """\
cov = Coverage()
cov.load()
cov.html_report(title='Hello_there')
""")
self.cmd_executes("html -q", """\
cov = Coverage(messages=False)
cov.load()
cov.html_report()
""")
self.cmd_executes("html --quiet", """\
cov = Coverage(messages=False)
cov.load()
cov.html_report()
""")
def test_json(self):
# coverage json [-i] [--omit DIR,...] [FILE1 FILE2 ...]
self.cmd_executes("json", """\
cov = Coverage()
cov.load()
cov.json_report()
""")
self.cmd_executes("json --pretty-print", """\
cov = Coverage()
cov.load()
cov.json_report(pretty_print=True)
""")
self.cmd_executes("json --pretty-print --show-contexts", """\
cov = Coverage()
cov.load()
cov.json_report(pretty_print=True, show_contexts=True)
""")
self.cmd_executes("json -i", """\
cov = Coverage()
cov.load()
cov.json_report(ignore_errors=True)
""")
self.cmd_executes("json -o myjson.foo", """\
cov = Coverage()
cov.load()
cov.json_report(outfile="myjson.foo")
""")
self.cmd_executes("json -o -", """\
cov = Coverage()
cov.load()
cov.json_report(outfile="-")
""")
self.cmd_executes("json --omit fooey", """\
cov = Coverage(omit=["fooey"])
cov.load()
cov.json_report(omit=["fooey"])
""")
self.cmd_executes("json --omit fooey,booey", """\
cov = Coverage(omit=["fooey", "booey"])
cov.load()
cov.json_report(omit=["fooey", "booey"])
""")
self.cmd_executes("json mod1", """\
cov = Coverage()
cov.load()
cov.json_report(morfs=["mod1"])
""")
self.cmd_executes("json mod1 mod2 mod3", """\
cov = Coverage()
cov.load()
cov.json_report(morfs=["mod1", "mod2", "mod3"])
""")
self.cmd_executes("json -q", """\
cov = Coverage(messages=False)
cov.load()
cov.json_report()
""")
self.cmd_executes("json --quiet", """\
cov = Coverage(messages=False)
cov.load()
cov.json_report()
""")
def test_lcov(self):
# coverage lcov [-i] [--omit DIR,...] [FILE1 FILE2 ...]
self.cmd_executes("lcov", """\
cov = Coverage()
cov.load()
cov.lcov_report()
""")
self.cmd_executes("lcov -i", """\
cov = Coverage()
cov.load()
cov.lcov_report(ignore_errors=True)
""")
self.cmd_executes("lcov -o mylcov.foo", """\
cov = Coverage()
cov.load()
cov.lcov_report(outfile="mylcov.foo")
""")
self.cmd_executes("lcov -o -", """\
cov = Coverage()
cov.load()
cov.lcov_report(outfile="-")
""")
self.cmd_executes("lcov --omit fooey", """\
cov = Coverage(omit=["fooey"])
cov.load()
cov.lcov_report(omit=["fooey"])
""")
self.cmd_executes("lcov --omit fooey,booey", """\
cov = Coverage(omit=["fooey", "booey"])
cov.load()
cov.lcov_report(omit=["fooey", "booey"])
""")
self.cmd_executes("lcov -q", """\
cov = Coverage(messages=False)
cov.load()
cov.lcov_report()
""")
self.cmd_executes("lcov --quiet", """\
cov = Coverage(messages=False)
cov.load()
cov.lcov_report()
""")
def test_report(self):
# coverage report [-m] [-i] [-o DIR,...] [FILE1 FILE2 ...]
self.cmd_executes("report", """\
cov = Coverage()
cov.load()
cov.report(show_missing=None)
""")
self.cmd_executes("report -i", """\
cov = Coverage()
cov.load()
cov.report(ignore_errors=True)
""")
self.cmd_executes("report -m", """\
cov = Coverage()
cov.load()
cov.report(show_missing=True)
""")
self.cmd_executes("report --omit fooey", """\
cov = Coverage(omit=["fooey"])
cov.load()
cov.report(omit=["fooey"])
""")
self.cmd_executes("report --omit fooey,booey", """\
cov = Coverage(omit=["fooey", "booey"])
cov.load()
cov.report(omit=["fooey", "booey"])
""")
self.cmd_executes("report mod1", """\
cov = Coverage()
cov.load()
cov.report(morfs=["mod1"])
""")
self.cmd_executes("report mod1 mod2 mod3", """\
cov = Coverage()
cov.load()
cov.report(morfs=["mod1", "mod2", "mod3"])
""")
self.cmd_executes("report --precision=7", """\
cov = Coverage()
cov.load()
cov.report(precision=7)
""")
self.cmd_executes("report --skip-covered", """\
cov = Coverage()
cov.load()
cov.report(skip_covered=True)
""")
self.cmd_executes("report --skip-covered --no-skip-covered", """\
cov = Coverage()
cov.load()
cov.report(skip_covered=False)
""")
self.cmd_executes("report --no-skip-covered", """\
cov = Coverage()
cov.load()
cov.report(skip_covered=False)
""")
self.cmd_executes("report --skip-empty", """\
cov = Coverage()
cov.load()
cov.report(skip_empty=True)
""")
self.cmd_executes("report --contexts=foo,bar", """\
cov = Coverage()
cov.load()
cov.report(contexts=["foo", "bar"])
""")
self.cmd_executes("report --sort=-foo", """\
cov = Coverage()
cov.load()
cov.report(sort='-foo')
""")
self.cmd_executes("report --data-file=foo.cov.2", """\
cov = Coverage(data_file="foo.cov.2")
cov.load()
cov.report(show_missing=None)
""")
def test_run(self):
# coverage run [-p] [-L] [--timid] MODULE.py [ARG1 ARG2 ...]
# run calls coverage.erase first.
self.cmd_executes("run foo.py", """\
cov = Coverage()
runner = PyRunner(['foo.py'], as_module=False)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
# run -a combines with an existing data file before saving.
self.cmd_executes("run -a foo.py", """\
cov = Coverage()
runner = PyRunner(['foo.py'], as_module=False)
runner.prepare()
cov.load()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
# --timid sets a flag, and program arguments get passed through.
self.cmd_executes("run --timid foo.py abc 123", """\
cov = Coverage(timid=True)
runner = PyRunner(['foo.py', 'abc', '123'], as_module=False)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
# -L sets a flag, and flags for the program don't confuse us.
self.cmd_executes("run -p -L foo.py -a -b", """\
cov = Coverage(cover_pylib=True, data_suffix=True)
runner = PyRunner(['foo.py', '-a', '-b'], as_module=False)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
self.cmd_executes("run --branch foo.py", """\
cov = Coverage(branch=True)
runner = PyRunner(['foo.py'], as_module=False)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
self.cmd_executes("run --rcfile=myrc.rc foo.py", """\
cov = Coverage(config_file="myrc.rc")
runner = PyRunner(['foo.py'], as_module=False)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
self.cmd_executes("run --include=pre1,pre2 foo.py", """\
cov = Coverage(include=["pre1", "pre2"])
runner = PyRunner(['foo.py'], as_module=False)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
self.cmd_executes("run --omit=opre1,opre2 foo.py", """\
cov = Coverage(omit=["opre1", "opre2"])
runner = PyRunner(['foo.py'], as_module=False)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
self.cmd_executes("run --include=pre1,pre2 --omit=opre1,opre2 foo.py", """\
cov = Coverage(include=["pre1", "pre2"], omit=["opre1", "opre2"])
runner = PyRunner(['foo.py'], as_module=False)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
self.cmd_executes("run --source=quux,hi.there,/home/bar foo.py", """\
cov = Coverage(source=["quux", "hi.there", "/home/bar"])
runner = PyRunner(['foo.py'], as_module=False)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
self.cmd_executes("run --concurrency=gevent foo.py", """\
cov = Coverage(concurrency=['gevent'])
runner = PyRunner(['foo.py'], as_module=False)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
self.cmd_executes("run --concurrency=multiprocessing foo.py", """\
cov = Coverage(concurrency=['multiprocessing'])
runner = PyRunner(['foo.py'], as_module=False)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
self.cmd_executes("run --concurrency=gevent,thread foo.py", """\
cov = Coverage(concurrency=['gevent', 'thread'])
runner = PyRunner(['foo.py'], as_module=False)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
self.cmd_executes("run --data-file=output.coverage foo.py", """\
cov = Coverage(data_file="output.coverage")
runner = PyRunner(['foo.py'], as_module=False)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
def test_multiprocessing_needs_config_file(self):
# You can't use command-line args to add options to multiprocessing
# runs, since they won't make it to the subprocesses. You need to use a
# config file.
self.command_line("run --concurrency=multiprocessing --branch foo.py", ret=ERR)
msg = "Options affecting multiprocessing must only be specified in a configuration file."
_, err = self.stdouterr()
assert msg in err
assert "Remove --branch from the command line." in err
def test_run_debug(self):
self.cmd_executes("run --debug=opt1 foo.py", """\
cov = Coverage(debug=["opt1"])
runner = PyRunner(['foo.py'], as_module=False)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
self.cmd_executes("run --debug=opt1,opt2 foo.py", """\
cov = Coverage(debug=["opt1","opt2"])
runner = PyRunner(['foo.py'], as_module=False)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
def test_run_module(self):
self.cmd_executes("run -m mymodule", """\
cov = Coverage()
runner = PyRunner(['mymodule'], as_module=True)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
self.cmd_executes("run -m mymodule -qq arg1 arg2", """\
cov = Coverage()
runner = PyRunner(['mymodule', '-qq', 'arg1', 'arg2'], as_module=True)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
self.cmd_executes("run --branch -m mymodule", """\
cov = Coverage(branch=True)
runner = PyRunner(['mymodule'], as_module=True)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
self.cmd_executes_same("run -m mymodule", "run --module mymodule")
def test_run_nothing(self):
self.command_line("run", ret=ERR)
assert "Nothing to do" in self.stderr()
def test_run_from_config(self):
options = {"run:command_line": "myprog.py a 123 'a quoted thing' xyz"}
self.cmd_executes("run", """\
cov = Coverage()
runner = PyRunner(['myprog.py', 'a', '123', 'a quoted thing', 'xyz'], as_module=False)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""",
options=options,
)
def test_run_module_from_config(self):
self.cmd_executes("run", """\
cov = Coverage()
runner = PyRunner(['mymodule', 'thing1', 'thing2'], as_module=True)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""",
options={"run:command_line": "-m mymodule thing1 thing2"},
)
def test_run_from_config_but_empty(self):
self.cmd_executes("run", """\
cov = Coverage()
show_help('Nothing to do.')
""",
ret=ERR,
options={"run:command_line": ""},
)
def test_run_dashm_only(self):
self.cmd_executes("run -m", """\
cov = Coverage()
show_help('No module specified for -m')
""",
ret=ERR,
)
self.cmd_executes("run -m", """\
cov = Coverage()
show_help('No module specified for -m')
""",
ret=ERR,
options={"run:command_line": "myprog.py"}
)
def test_cant_append_parallel(self):
self.command_line("run --append --parallel-mode foo.py", ret=ERR)
assert "Can't append to data files in parallel mode." in self.stderr()
def test_xml(self):
# coverage xml [-i] [--omit DIR,...] [FILE1 FILE2 ...]
self.cmd_executes("xml", """\
cov = Coverage()
cov.load()
cov.xml_report()
""")
self.cmd_executes("xml -i", """\
cov = Coverage()
cov.load()
cov.xml_report(ignore_errors=True)
""")
self.cmd_executes("xml -o myxml.foo", """\
cov = Coverage()
cov.load()
cov.xml_report(outfile="myxml.foo")
""")
self.cmd_executes("xml -o -", """\
cov = Coverage()
cov.load()
cov.xml_report(outfile="-")
""")
self.cmd_executes("xml --omit fooey", """\
cov = Coverage(omit=["fooey"])
cov.load()
cov.xml_report(omit=["fooey"])
""")
self.cmd_executes("xml --omit fooey,booey", """\
cov = Coverage(omit=["fooey", "booey"])
cov.load()
cov.xml_report(omit=["fooey", "booey"])
""")
self.cmd_executes("xml mod1", """\
cov = Coverage()
cov.load()
cov.xml_report(morfs=["mod1"])
""")
self.cmd_executes("xml mod1 mod2 mod3", """\
cov = Coverage()
cov.load()
cov.xml_report(morfs=["mod1", "mod2", "mod3"])
""")
self.cmd_executes("xml -q", """\
cov = Coverage(messages=False)
cov.load()
cov.xml_report()
""")
self.cmd_executes("xml --quiet", """\
cov = Coverage(messages=False)
cov.load()
cov.xml_report()
""")
def test_no_arguments_at_all(self):
self.cmd_help("", topic="minimum_help", ret=OK)
def test_bad_command(self):
self.cmd_help("xyzzy", "Unknown command: 'xyzzy'")
class CmdLineWithFilesTest(BaseCmdLineTest):
"""Test the command line in ways that need temp files."""
run_in_temp_dir = True
def test_debug_data(self):
data = self.make_data_file(
lines={
"file1.py": range(1, 18),
"file2.py": range(1, 24),
},
file_tracers={"file1.py": "a_plugin"},
)
self.command_line("debug data")
assert self.stdout() == textwrap.dedent(f"""\
-- data ------------------------------------------------------
path: {data.data_filename()}
has_arcs: False
2 files:
file1.py: 17 lines [a_plugin]
file2.py: 23 lines
""")
def test_debug_data_with_no_data_file(self):
data = self.make_data_file()
self.command_line("debug data")
assert self.stdout() == textwrap.dedent(f"""\
-- data ------------------------------------------------------
path: {data.data_filename()}
No data collected: file doesn't exist
""")
def test_debug_combinable_data(self):
data1 = self.make_data_file(lines={"file1.py": range(1, 18), "file2.py": [1]})
data2 = self.make_data_file(suffix="123", lines={"file2.py": range(1, 10)})
self.command_line("debug data")
assert self.stdout() == textwrap.dedent(f"""\
-- data ------------------------------------------------------
path: {data1.data_filename()}
has_arcs: False
2 files:
file1.py: 17 lines
file2.py: 1 line
-----
path: {data2.data_filename()}
has_arcs: False
1 file:
file2.py: 9 lines
""")
class CmdLineStdoutTest(BaseCmdLineTest):
"""Test the command line with real stdout output."""
def test_minimum_help(self):
self.command_line("")
out = self.stdout()
assert "Code coverage for Python" in out
assert out.count("\n") < 4
def test_version(self):
self.command_line("--version")
out = self.stdout()
assert "ersion " in out
if env.C_TRACER:
assert "with C extension" in out
else:
assert "without C extension" in out
assert out.count("\n") < 4
@pytest.mark.skipif(env.JYTHON, reason="Jython gets mad if you patch sys.argv")
def test_help_contains_command_name(self):
# Command name should be present in help output.
fake_command_path = os_sep("lorem/ipsum/dolor")
expected_command_name = "dolor"
fake_argv = [fake_command_path, "sit", "amet"]
with mock.patch.object(sys, 'argv', new=fake_argv):
self.command_line("help")
out = self.stdout()
assert expected_command_name in out
@pytest.mark.skipif(env.JYTHON, reason="Jython gets mad if you patch sys.argv")
def test_help_contains_command_name_from_package(self):
# Command package name should be present in help output.
#
# When the main module is actually a package's `__main__` module, the resulting command line
# has the `__main__.py` file's patch as the command name. Instead, the command name should
# be derived from the package name.
fake_command_path = os_sep("lorem/ipsum/dolor/__main__.py")
expected_command_name = "dolor"
fake_argv = [fake_command_path, "sit", "amet"]
with mock.patch.object(sys, 'argv', new=fake_argv):
self.command_line("help")
out = self.stdout()
assert expected_command_name in out
def test_help(self):
self.command_line("help")
lines = self.stdout().splitlines()
assert len(lines) > 10
assert lines[-1] == f"Full documentation is at {__url__}"
def test_cmd_help(self):
self.command_line("help run")
out = self.stdout()
lines = out.splitlines()
assert "<pyfile>" in lines[0]
assert "--timid" in out
assert len(lines) > 20
assert lines[-1] == f"Full documentation is at {__url__}"
def test_unknown_topic(self):
# Should probably be an ERR return, but meh.
self.command_line("help foobar")
lines = self.stdout().splitlines()
assert lines[0] == "Don't know topic 'foobar'"
assert lines[-1] == f"Full documentation is at {__url__}"
def test_error(self):
self.command_line("fooey kablooey", ret=ERR)
err = self.stderr()
assert "fooey" in err
assert "help" in err
def test_doc_url(self):
assert __url__.startswith("https://coverage.readthedocs.io")
class CmdMainTest(CoverageTest):
"""Tests of coverage.cmdline.main(), using mocking for isolation."""
run_in_temp_dir = False
class CoverageScriptStub:
"""A stub for coverage.cmdline.CoverageScript, used by CmdMainTest."""
def command_line(self, argv):
"""Stub for command_line, the arg determines what it will do."""
if argv[0] == 'hello':
print("Hello, world!")
elif argv[0] == 'raise':
try:
raise Exception("oh noes!")
except:
raise _ExceptionDuringRun(*sys.exc_info())
elif argv[0] == 'internalraise':
raise ValueError("coverage is broken")
elif argv[0] == 'exit':
sys.exit(23)
else:
raise AssertionError(f"Bad CoverageScriptStub: {argv!r}")
return 0
def setUp(self):
super().setUp()
old_CoverageScript = coverage.cmdline.CoverageScript
coverage.cmdline.CoverageScript = self.CoverageScriptStub
self.addCleanup(setattr, coverage.cmdline, 'CoverageScript', old_CoverageScript)
def test_normal(self):
ret = coverage.cmdline.main(['hello'])
assert ret == 0
assert self.stdout() == "Hello, world!\n"
def test_raise(self):
ret = coverage.cmdline.main(['raise'])
assert ret == 1
out, err = self.stdouterr()
assert out == ""
print(err)
err = err.splitlines(keepends=True)
assert err[0] == 'Traceback (most recent call last):\n'
assert ' raise Exception("oh noes!")\n' in err
assert err[-1] == 'Exception: oh noes!\n'
def test_internalraise(self):
with pytest.raises(ValueError, match="coverage is broken"):
coverage.cmdline.main(['internalraise'])
def test_exit(self):
ret = coverage.cmdline.main(['exit'])
assert ret == 23
class CoverageReportingFake:
"""A fake Coverage.coverage test double for FailUnderTest methods."""
# pylint: disable=missing-function-docstring
def __init__(self, report_result, html_result=0, xml_result=0, json_report=0, lcov_result=0):
self.config = CoverageConfig()
self.report_result = report_result
self.html_result = html_result
self.xml_result = xml_result
self.json_result = json_report
self.lcov_result = lcov_result
def set_option(self, optname, optvalue):
self.config.set_option(optname, optvalue)
def get_option(self, optname):
return self.config.get_option(optname)
def load(self):
pass
def report(self, *args_unused, **kwargs_unused):
return self.report_result
def html_report(self, *args_unused, **kwargs_unused):
return self.html_result
def xml_report(self, *args_unused, **kwargs_unused):
return self.xml_result
def json_report(self, *args_unused, **kwargs_unused):
return self.json_result
def lcov_report(self, *args_unused, **kwargs_unused):
return self.lcov_result
class FailUnderTest(CoverageTest):
"""Tests of the --fail-under handling in cmdline.py."""
@pytest.mark.parametrize("results, fail_under, cmd, ret", [
# Command-line switch properly checks the result of reporting functions.
((20, 30, 40, 50, 60), None, "report --fail-under=19", 0),
((20, 30, 40, 50, 60), None, "report --fail-under=21", 2),
((20, 30, 40, 50, 60), None, "html --fail-under=29", 0),
((20, 30, 40, 50, 60), None, "html --fail-under=31", 2),
((20, 30, 40, 50, 60), None, "xml --fail-under=39", 0),
((20, 30, 40, 50, 60), None, "xml --fail-under=41", 2),
((20, 30, 40, 50, 60), None, "json --fail-under=49", 0),
((20, 30, 40, 50, 60), None, "json --fail-under=51", 2),
((20, 30, 40, 50, 60), None, "lcov --fail-under=59", 0),
((20, 30, 40, 50, 60), None, "lcov --fail-under=61", 2),
# Configuration file setting properly checks the result of reporting.
((20, 30, 40, 50, 60), 19, "report", 0),
((20, 30, 40, 50, 60), 21, "report", 2),
((20, 30, 40, 50, 60), 29, "html", 0),
((20, 30, 40, 50, 60), 31, "html", 2),
((20, 30, 40, 50, 60), 39, "xml", 0),
((20, 30, 40, 50, 60), 41, "xml", 2),
((20, 30, 40, 50, 60), 49, "json", 0),
((20, 30, 40, 50, 60), 51, "json", 2),
((20, 30, 40, 50, 60), 59, "lcov", 0),
((20, 30, 40, 50, 60), 61, "lcov", 2),
# Command-line overrides configuration.
((20, 30, 40, 50, 60), 19, "report --fail-under=21", 2),
])
def test_fail_under(self, results, fail_under, cmd, ret):
cov = CoverageReportingFake(*results)
if fail_under is not None:
cov.set_option("report:fail_under", fail_under)
with mock.patch("coverage.cmdline.Coverage", lambda *a,**kw: cov):
self.command_line(cmd, ret)
@pytest.mark.parametrize("result, cmd, ret, msg", [
(20.5, "report --fail-under=20.4 --precision=1", 0, ""),
(20.5, "report --fail-under=20.6 --precision=1", 2,
"Coverage failure: total of 20.5 is less than fail-under=20.6\n"),
(20.12345, "report --fail-under=20.1235 --precision=5", 2,
"Coverage failure: total of 20.12345 is less than fail-under=20.12350\n"),
])
def test_fail_under_with_precision(self, result, cmd, ret, msg):
cov = CoverageReportingFake(report_result=result)
with mock.patch("coverage.cmdline.Coverage", lambda *a,**kw: cov):
self.command_line(cmd, ret)
assert self.stdout() == msg
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Test cmdline.py for coverage.py."""
import pprint
import re
import sys
import textwrap
from unittest import mock
import pytest
import coverage
import coverage.cmdline
from coverage import env
from coverage.control import DEFAULT_DATAFILE
from coverage.config import CoverageConfig
from coverage.exceptions import _ExceptionDuringRun
from coverage.version import __url__
from tests.coveragetest import CoverageTest, OK, ERR, command_line
from tests.helpers import os_sep
class BaseCmdLineTest(CoverageTest):
"""Tests of execution paths through the command line interpreter."""
run_in_temp_dir = False
# Make a dict mapping function names to the default values that cmdline.py
# uses when calling the function.
_defaults = mock.Mock()
_defaults.Coverage().annotate(
directory=None, ignore_errors=None, include=None, omit=None, morfs=[],
contexts=None,
)
_defaults.Coverage().html_report(
directory=None, ignore_errors=None, include=None, omit=None, morfs=[],
skip_covered=None, show_contexts=None, title=None, contexts=None,
skip_empty=None, precision=None,
)
_defaults.Coverage().report(
ignore_errors=None, include=None, omit=None, morfs=[],
show_missing=None, skip_covered=None, contexts=None, skip_empty=None, precision=None,
sort=None,
)
_defaults.Coverage().xml_report(
ignore_errors=None, include=None, omit=None, morfs=[], outfile=None,
contexts=None, skip_empty=None,
)
_defaults.Coverage().json_report(
ignore_errors=None, include=None, omit=None, morfs=[], outfile=None,
contexts=None, pretty_print=None, show_contexts=None,
)
_defaults.Coverage().lcov_report(
ignore_errors=None, include=None, omit=None, morfs=[], outfile=None,
contexts=None,
)
_defaults.Coverage(
data_file=DEFAULT_DATAFILE,
cover_pylib=None, data_suffix=None, timid=None, branch=None,
config_file=True, source=None, include=None, omit=None, debug=None,
concurrency=None, check_preimported=True, context=None, messages=True,
)
DEFAULT_KWARGS = {name: kw for name, _, kw in _defaults.mock_calls}
def model_object(self):
"""Return a Mock suitable for use in CoverageScript."""
mk = mock.Mock()
cov = mk.Coverage.return_value
# The mock needs options.
mk.config = CoverageConfig()
cov.get_option = mk.config.get_option
cov.set_option = mk.config.set_option
# Get the type right for the result of reporting.
cov.report.return_value = 50.0
cov.html_report.return_value = 50.0
cov.xml_report.return_value = 50.0
cov.json_report.return_value = 50.0
cov.lcov_report.return_value = 50.0
return mk
# Global names in cmdline.py that will be mocked during the tests.
MOCK_GLOBALS = ['Coverage', 'PyRunner', 'show_help']
def mock_command_line(self, args, options=None):
"""Run `args` through the command line, with a Mock.
`options` is a dict of names and values to pass to `set_option`.
Returns the Mock it used and the status code returned.
"""
mk = self.model_object()
if options is not None:
for name, value in options.items():
mk.config.set_option(name, value)
patchers = [
mock.patch("coverage.cmdline."+name, getattr(mk, name))
for name in self.MOCK_GLOBALS
]
for patcher in patchers:
patcher.start()
try:
ret = command_line(args)
finally:
for patcher in patchers:
patcher.stop()
return mk, ret
def cmd_executes(self, args, code, ret=OK, options=None):
"""Assert that the `args` end up executing the sequence in `code`."""
called, status = self.mock_command_line(args, options=options)
assert status == ret, f"Wrong status: got {status!r}, wanted {ret!r}"
# Remove all indentation, and execute with mock globals
code = textwrap.dedent(code)
expected = self.model_object()
globs = {n: getattr(expected, n) for n in self.MOCK_GLOBALS}
code_obj = compile(code, "<code>", "exec")
eval(code_obj, globs, {}) # pylint: disable=eval-used
# Many of our functions take a lot of arguments, and cmdline.py
# calls them with many. But most of them are just the defaults, which
# we don't want to have to repeat in all tests. For each call, apply
# the defaults. This lets the tests just mention the interesting ones.
for name, _, kwargs in expected.mock_calls:
for k, v in self.DEFAULT_KWARGS.get(name, {}).items():
kwargs.setdefault(k, v)
self.assert_same_mock_calls(expected, called)
def cmd_executes_same(self, args1, args2):
"""Assert that the `args1` executes the same as `args2`."""
m1, r1 = self.mock_command_line(args1)
m2, r2 = self.mock_command_line(args2)
assert r1 == r2
self.assert_same_mock_calls(m1, m2)
def assert_same_mock_calls(self, m1, m2):
"""Assert that `m1.mock_calls` and `m2.mock_calls` are the same."""
# Use a real equality comparison, but if it fails, use a nicer assert
# so we can tell what's going on. We have to use the real == first due
# to CmdOptionParser.__eq__
if m1.mock_calls != m2.mock_calls:
pp1 = pprint.pformat(m1.mock_calls)
pp2 = pprint.pformat(m2.mock_calls)
assert pp1+'\n' == pp2+'\n'
def cmd_help(self, args, help_msg=None, topic=None, ret=ERR):
"""Run a command line, and check that it prints the right help.
Only the last function call in the mock is checked, which should be the
help message that we want to see.
"""
mk, status = self.mock_command_line(args)
assert status == ret, f"Wrong status: got {status}, wanted {ret}"
if help_msg:
assert mk.mock_calls[-1] == ('show_help', (help_msg,), {})
else:
assert mk.mock_calls[-1] == ('show_help', (), {'topic': topic})
class BaseCmdLineTestTest(BaseCmdLineTest):
"""Tests that our BaseCmdLineTest helpers work."""
def test_cmd_executes_same(self):
# All the other tests here use self.cmd_executes_same in successful
# ways, so here we just check that it fails.
with pytest.raises(AssertionError):
self.cmd_executes_same("run", "debug")
class CmdLineTest(BaseCmdLineTest):
"""Tests of the coverage.py command line."""
def test_annotate(self):
# coverage annotate [-d DIR] [-i] [--omit DIR,...] [FILE1 FILE2 ...]
self.cmd_executes("annotate", """\
cov = Coverage()
cov.load()
cov.annotate()
""")
self.cmd_executes("annotate -d dir1", """\
cov = Coverage()
cov.load()
cov.annotate(directory="dir1")
""")
self.cmd_executes("annotate -i", """\
cov = Coverage()
cov.load()
cov.annotate(ignore_errors=True)
""")
self.cmd_executes("annotate --omit fooey", """\
cov = Coverage(omit=["fooey"])
cov.load()
cov.annotate(omit=["fooey"])
""")
self.cmd_executes("annotate --omit fooey,booey", """\
cov = Coverage(omit=["fooey", "booey"])
cov.load()
cov.annotate(omit=["fooey", "booey"])
""")
self.cmd_executes("annotate mod1", """\
cov = Coverage()
cov.load()
cov.annotate(morfs=["mod1"])
""")
self.cmd_executes("annotate mod1 mod2 mod3", """\
cov = Coverage()
cov.load()
cov.annotate(morfs=["mod1", "mod2", "mod3"])
""")
def test_combine(self):
# coverage combine with args
self.cmd_executes("combine datadir1", """\
cov = Coverage()
cov.combine(["datadir1"], strict=True, keep=False)
cov.save()
""")
# coverage combine, appending
self.cmd_executes("combine --append datadir1", """\
cov = Coverage()
cov.load()
cov.combine(["datadir1"], strict=True, keep=False)
cov.save()
""")
# coverage combine without args
self.cmd_executes("combine", """\
cov = Coverage()
cov.combine(None, strict=True, keep=False)
cov.save()
""")
# coverage combine quietly
self.cmd_executes("combine -q", """\
cov = Coverage(messages=False)
cov.combine(None, strict=True, keep=False)
cov.save()
""")
self.cmd_executes("combine --quiet", """\
cov = Coverage(messages=False)
cov.combine(None, strict=True, keep=False)
cov.save()
""")
self.cmd_executes("combine --data-file=foo.cov", """\
cov = Coverage(data_file="foo.cov")
cov.combine(None, strict=True, keep=False)
cov.save()
""")
def test_combine_doesnt_confuse_options_with_args(self):
# https://github.com/nedbat/coveragepy/issues/385
self.cmd_executes("combine --rcfile cov.ini", """\
cov = Coverage(config_file='cov.ini')
cov.combine(None, strict=True, keep=False)
cov.save()
""")
self.cmd_executes("combine --rcfile cov.ini data1 data2/more", """\
cov = Coverage(config_file='cov.ini')
cov.combine(["data1", "data2/more"], strict=True, keep=False)
cov.save()
""")
@pytest.mark.parametrize("cmd, output", [
("debug", "What information would you like: config, data, sys, premain?"),
("debug foo", "Don't know what you mean by 'foo'"),
("debug sys config", "Only one topic at a time, please"),
])
def test_debug(self, cmd, output):
self.cmd_help(cmd, output)
def test_debug_sys(self):
self.command_line("debug sys")
out = self.stdout()
assert "version:" in out
assert "data_file:" in out
def test_debug_config(self):
self.command_line("debug config")
out = self.stdout()
assert "cover_pylib:" in out
assert "skip_covered:" in out
assert "skip_empty:" in out
def test_debug_premain(self):
self.command_line("debug premain")
out = self.stdout()
# ... many lines ...
# pytest_pyfunc_call : /Users/ned/cov/trunk/.tox/py39/site-packages/_pytest/python.py:183
# test_debug_premain : /Users/ned/cov/trunk/tests/test_cmdline.py:284
# command_line : /Users/ned/cov/trunk/tests/coveragetest.py:309
# command_line : /Users/ned/cov/trunk/tests/coveragetest.py:472
# command_line : /Users/ned/cov/trunk/coverage/cmdline.py:592
# do_debug : /Users/ned/cov/trunk/coverage/cmdline.py:804
assert re.search(r"(?m)^\s+test_debug_premain : .*[/\\]tests[/\\]test_cmdline.py:\d+$", out)
assert re.search(r"(?m)^\s+command_line : .*[/\\]coverage[/\\]cmdline.py:\d+$", out)
assert re.search(r"(?m)^\s+do_debug : .*[/\\]coverage[/\\]cmdline.py:\d+$", out)
def test_erase(self):
# coverage erase
self.cmd_executes("erase", """\
cov = Coverage()
cov.erase()
""")
self.cmd_executes("erase --data-file=foo.cov", """\
cov = Coverage(data_file="foo.cov")
cov.erase()
""")
def test_version(self):
# coverage --version
self.cmd_help("--version", topic="version", ret=OK)
def test_help_option(self):
# coverage -h
self.cmd_help("-h", topic="help", ret=OK)
self.cmd_help("--help", topic="help", ret=OK)
def test_help_command(self):
self.cmd_executes("help", "show_help(topic='help')")
def test_cmd_help(self):
self.cmd_executes("run --help", "show_help(parser='<CmdOptionParser:run>')")
self.cmd_executes_same("help run", "run --help")
def test_html(self):
# coverage html -d DIR [-i] [--omit DIR,...] [FILE1 FILE2 ...]
self.cmd_executes("html", """\
cov = Coverage()
cov.load()
cov.html_report()
""")
self.cmd_executes("html -d dir1", """\
cov = Coverage()
cov.load()
cov.html_report(directory="dir1")
""")
self.cmd_executes("html -i", """\
cov = Coverage()
cov.load()
cov.html_report(ignore_errors=True)
""")
self.cmd_executes("html --omit fooey", """\
cov = Coverage(omit=["fooey"])
cov.load()
cov.html_report(omit=["fooey"])
""")
self.cmd_executes("html --omit fooey,booey", """\
cov = Coverage(omit=["fooey", "booey"])
cov.load()
cov.html_report(omit=["fooey", "booey"])
""")
self.cmd_executes("html mod1", """\
cov = Coverage()
cov.load()
cov.html_report(morfs=["mod1"])
""")
self.cmd_executes("html mod1 mod2 mod3", """\
cov = Coverage()
cov.load()
cov.html_report(morfs=["mod1", "mod2", "mod3"])
""")
self.cmd_executes("html --precision=3", """\
cov = Coverage()
cov.load()
cov.html_report(precision=3)
""")
self.cmd_executes("html --title=Hello_there", """\
cov = Coverage()
cov.load()
cov.html_report(title='Hello_there')
""")
self.cmd_executes("html -q", """\
cov = Coverage(messages=False)
cov.load()
cov.html_report()
""")
self.cmd_executes("html --quiet", """\
cov = Coverage(messages=False)
cov.load()
cov.html_report()
""")
def test_json(self):
# coverage json [-i] [--omit DIR,...] [FILE1 FILE2 ...]
self.cmd_executes("json", """\
cov = Coverage()
cov.load()
cov.json_report()
""")
self.cmd_executes("json --pretty-print", """\
cov = Coverage()
cov.load()
cov.json_report(pretty_print=True)
""")
self.cmd_executes("json --pretty-print --show-contexts", """\
cov = Coverage()
cov.load()
cov.json_report(pretty_print=True, show_contexts=True)
""")
self.cmd_executes("json -i", """\
cov = Coverage()
cov.load()
cov.json_report(ignore_errors=True)
""")
self.cmd_executes("json -o myjson.foo", """\
cov = Coverage()
cov.load()
cov.json_report(outfile="myjson.foo")
""")
self.cmd_executes("json -o -", """\
cov = Coverage()
cov.load()
cov.json_report(outfile="-")
""")
self.cmd_executes("json --omit fooey", """\
cov = Coverage(omit=["fooey"])
cov.load()
cov.json_report(omit=["fooey"])
""")
self.cmd_executes("json --omit fooey,booey", """\
cov = Coverage(omit=["fooey", "booey"])
cov.load()
cov.json_report(omit=["fooey", "booey"])
""")
self.cmd_executes("json mod1", """\
cov = Coverage()
cov.load()
cov.json_report(morfs=["mod1"])
""")
self.cmd_executes("json mod1 mod2 mod3", """\
cov = Coverage()
cov.load()
cov.json_report(morfs=["mod1", "mod2", "mod3"])
""")
self.cmd_executes("json -q", """\
cov = Coverage(messages=False)
cov.load()
cov.json_report()
""")
self.cmd_executes("json --quiet", """\
cov = Coverage(messages=False)
cov.load()
cov.json_report()
""")
def test_lcov(self):
# coverage lcov [-i] [--omit DIR,...] [FILE1 FILE2 ...]
self.cmd_executes("lcov", """\
cov = Coverage()
cov.load()
cov.lcov_report()
""")
self.cmd_executes("lcov -i", """\
cov = Coverage()
cov.load()
cov.lcov_report(ignore_errors=True)
""")
self.cmd_executes("lcov -o mylcov.foo", """\
cov = Coverage()
cov.load()
cov.lcov_report(outfile="mylcov.foo")
""")
self.cmd_executes("lcov -o -", """\
cov = Coverage()
cov.load()
cov.lcov_report(outfile="-")
""")
self.cmd_executes("lcov --omit fooey", """\
cov = Coverage(omit=["fooey"])
cov.load()
cov.lcov_report(omit=["fooey"])
""")
self.cmd_executes("lcov --omit fooey,booey", """\
cov = Coverage(omit=["fooey", "booey"])
cov.load()
cov.lcov_report(omit=["fooey", "booey"])
""")
self.cmd_executes("lcov -q", """\
cov = Coverage(messages=False)
cov.load()
cov.lcov_report()
""")
self.cmd_executes("lcov --quiet", """\
cov = Coverage(messages=False)
cov.load()
cov.lcov_report()
""")
def test_report(self):
# coverage report [-m] [-i] [-o DIR,...] [FILE1 FILE2 ...]
self.cmd_executes("report", """\
cov = Coverage()
cov.load()
cov.report(show_missing=None)
""")
self.cmd_executes("report -i", """\
cov = Coverage()
cov.load()
cov.report(ignore_errors=True)
""")
self.cmd_executes("report -m", """\
cov = Coverage()
cov.load()
cov.report(show_missing=True)
""")
self.cmd_executes("report --omit fooey", """\
cov = Coverage(omit=["fooey"])
cov.load()
cov.report(omit=["fooey"])
""")
self.cmd_executes("report --omit fooey,booey", """\
cov = Coverage(omit=["fooey", "booey"])
cov.load()
cov.report(omit=["fooey", "booey"])
""")
self.cmd_executes("report mod1", """\
cov = Coverage()
cov.load()
cov.report(morfs=["mod1"])
""")
self.cmd_executes("report mod1 mod2 mod3", """\
cov = Coverage()
cov.load()
cov.report(morfs=["mod1", "mod2", "mod3"])
""")
self.cmd_executes("report --precision=7", """\
cov = Coverage()
cov.load()
cov.report(precision=7)
""")
self.cmd_executes("report --skip-covered", """\
cov = Coverage()
cov.load()
cov.report(skip_covered=True)
""")
self.cmd_executes("report --skip-covered --no-skip-covered", """\
cov = Coverage()
cov.load()
cov.report(skip_covered=False)
""")
self.cmd_executes("report --no-skip-covered", """\
cov = Coverage()
cov.load()
cov.report(skip_covered=False)
""")
self.cmd_executes("report --skip-empty", """\
cov = Coverage()
cov.load()
cov.report(skip_empty=True)
""")
self.cmd_executes("report --contexts=foo,bar", """\
cov = Coverage()
cov.load()
cov.report(contexts=["foo", "bar"])
""")
self.cmd_executes("report --sort=-foo", """\
cov = Coverage()
cov.load()
cov.report(sort='-foo')
""")
self.cmd_executes("report --data-file=foo.cov.2", """\
cov = Coverage(data_file="foo.cov.2")
cov.load()
cov.report(show_missing=None)
""")
def test_run(self):
# coverage run [-p] [-L] [--timid] MODULE.py [ARG1 ARG2 ...]
# run calls coverage.erase first.
self.cmd_executes("run foo.py", """\
cov = Coverage()
runner = PyRunner(['foo.py'], as_module=False)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
# run -a combines with an existing data file before saving.
self.cmd_executes("run -a foo.py", """\
cov = Coverage()
runner = PyRunner(['foo.py'], as_module=False)
runner.prepare()
cov.load()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
# --timid sets a flag, and program arguments get passed through.
self.cmd_executes("run --timid foo.py abc 123", """\
cov = Coverage(timid=True)
runner = PyRunner(['foo.py', 'abc', '123'], as_module=False)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
# -L sets a flag, and flags for the program don't confuse us.
self.cmd_executes("run -p -L foo.py -a -b", """\
cov = Coverage(cover_pylib=True, data_suffix=True)
runner = PyRunner(['foo.py', '-a', '-b'], as_module=False)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
self.cmd_executes("run --branch foo.py", """\
cov = Coverage(branch=True)
runner = PyRunner(['foo.py'], as_module=False)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
self.cmd_executes("run --rcfile=myrc.rc foo.py", """\
cov = Coverage(config_file="myrc.rc")
runner = PyRunner(['foo.py'], as_module=False)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
self.cmd_executes("run --include=pre1,pre2 foo.py", """\
cov = Coverage(include=["pre1", "pre2"])
runner = PyRunner(['foo.py'], as_module=False)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
self.cmd_executes("run --omit=opre1,opre2 foo.py", """\
cov = Coverage(omit=["opre1", "opre2"])
runner = PyRunner(['foo.py'], as_module=False)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
self.cmd_executes("run --include=pre1,pre2 --omit=opre1,opre2 foo.py", """\
cov = Coverage(include=["pre1", "pre2"], omit=["opre1", "opre2"])
runner = PyRunner(['foo.py'], as_module=False)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
self.cmd_executes("run --source=quux,hi.there,/home/bar foo.py", """\
cov = Coverage(source=["quux", "hi.there", "/home/bar"])
runner = PyRunner(['foo.py'], as_module=False)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
self.cmd_executes("run --concurrency=gevent foo.py", """\
cov = Coverage(concurrency=['gevent'])
runner = PyRunner(['foo.py'], as_module=False)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
self.cmd_executes("run --concurrency=multiprocessing foo.py", """\
cov = Coverage(concurrency=['multiprocessing'])
runner = PyRunner(['foo.py'], as_module=False)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
self.cmd_executes("run --concurrency=gevent,thread foo.py", """\
cov = Coverage(concurrency=['gevent', 'thread'])
runner = PyRunner(['foo.py'], as_module=False)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
self.cmd_executes("run --data-file=output.coverage foo.py", """\
cov = Coverage(data_file="output.coverage")
runner = PyRunner(['foo.py'], as_module=False)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
def test_multiprocessing_needs_config_file(self):
# You can't use command-line args to add options to multiprocessing
# runs, since they won't make it to the subprocesses. You need to use a
# config file.
self.command_line("run --concurrency=multiprocessing --branch foo.py", ret=ERR)
msg = "Options affecting multiprocessing must only be specified in a configuration file."
_, err = self.stdouterr()
assert msg in err
assert "Remove --branch from the command line." in err
def test_run_debug(self):
self.cmd_executes("run --debug=opt1 foo.py", """\
cov = Coverage(debug=["opt1"])
runner = PyRunner(['foo.py'], as_module=False)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
self.cmd_executes("run --debug=opt1,opt2 foo.py", """\
cov = Coverage(debug=["opt1","opt2"])
runner = PyRunner(['foo.py'], as_module=False)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
def test_run_module(self):
self.cmd_executes("run -m mymodule", """\
cov = Coverage()
runner = PyRunner(['mymodule'], as_module=True)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
self.cmd_executes("run -m mymodule -qq arg1 arg2", """\
cov = Coverage()
runner = PyRunner(['mymodule', '-qq', 'arg1', 'arg2'], as_module=True)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
self.cmd_executes("run --branch -m mymodule", """\
cov = Coverage(branch=True)
runner = PyRunner(['mymodule'], as_module=True)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""")
self.cmd_executes_same("run -m mymodule", "run --module mymodule")
def test_run_nothing(self):
self.command_line("run", ret=ERR)
assert "Nothing to do" in self.stderr()
def test_run_from_config(self):
options = {"run:command_line": "myprog.py a 123 'a quoted thing' xyz"}
self.cmd_executes("run", """\
cov = Coverage()
runner = PyRunner(['myprog.py', 'a', '123', 'a quoted thing', 'xyz'], as_module=False)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""",
options=options,
)
def test_run_module_from_config(self):
self.cmd_executes("run", """\
cov = Coverage()
runner = PyRunner(['mymodule', 'thing1', 'thing2'], as_module=True)
runner.prepare()
cov.start()
runner.run()
cov.stop()
cov.save()
""",
options={"run:command_line": "-m mymodule thing1 thing2"},
)
def test_run_from_config_but_empty(self):
self.cmd_executes("run", """\
cov = Coverage()
show_help('Nothing to do.')
""",
ret=ERR,
options={"run:command_line": ""},
)
def test_run_dashm_only(self):
self.cmd_executes("run -m", """\
cov = Coverage()
show_help('No module specified for -m')
""",
ret=ERR,
)
self.cmd_executes("run -m", """\
cov = Coverage()
show_help('No module specified for -m')
""",
ret=ERR,
options={"run:command_line": "myprog.py"}
)
def test_cant_append_parallel(self):
self.command_line("run --append --parallel-mode foo.py", ret=ERR)
assert "Can't append to data files in parallel mode." in self.stderr()
def test_xml(self):
# coverage xml [-i] [--omit DIR,...] [FILE1 FILE2 ...]
self.cmd_executes("xml", """\
cov = Coverage()
cov.load()
cov.xml_report()
""")
self.cmd_executes("xml -i", """\
cov = Coverage()
cov.load()
cov.xml_report(ignore_errors=True)
""")
self.cmd_executes("xml -o myxml.foo", """\
cov = Coverage()
cov.load()
cov.xml_report(outfile="myxml.foo")
""")
self.cmd_executes("xml -o -", """\
cov = Coverage()
cov.load()
cov.xml_report(outfile="-")
""")
self.cmd_executes("xml --omit fooey", """\
cov = Coverage(omit=["fooey"])
cov.load()
cov.xml_report(omit=["fooey"])
""")
self.cmd_executes("xml --omit fooey,booey", """\
cov = Coverage(omit=["fooey", "booey"])
cov.load()
cov.xml_report(omit=["fooey", "booey"])
""")
self.cmd_executes("xml mod1", """\
cov = Coverage()
cov.load()
cov.xml_report(morfs=["mod1"])
""")
self.cmd_executes("xml mod1 mod2 mod3", """\
cov = Coverage()
cov.load()
cov.xml_report(morfs=["mod1", "mod2", "mod3"])
""")
self.cmd_executes("xml -q", """\
cov = Coverage(messages=False)
cov.load()
cov.xml_report()
""")
self.cmd_executes("xml --quiet", """\
cov = Coverage(messages=False)
cov.load()
cov.xml_report()
""")
def test_no_arguments_at_all(self):
self.cmd_help("", topic="minimum_help", ret=OK)
def test_bad_command(self):
self.cmd_help("xyzzy", "Unknown command: 'xyzzy'")
class CmdLineWithFilesTest(BaseCmdLineTest):
"""Test the command line in ways that need temp files."""
run_in_temp_dir = True
def test_debug_data(self):
data = self.make_data_file(
lines={
"file1.py": range(1, 18),
"file2.py": range(1, 24),
},
file_tracers={"file1.py": "a_plugin"},
)
self.command_line("debug data")
assert self.stdout() == textwrap.dedent(f"""\
-- data ------------------------------------------------------
path: {data.data_filename()}
has_arcs: False
2 files:
file1.py: 17 lines [a_plugin]
file2.py: 23 lines
""")
def test_debug_data_with_no_data_file(self):
data = self.make_data_file()
self.command_line("debug data")
assert self.stdout() == textwrap.dedent(f"""\
-- data ------------------------------------------------------
path: {data.data_filename()}
No data collected: file doesn't exist
""")
def test_debug_combinable_data(self):
data1 = self.make_data_file(lines={"file1.py": range(1, 18), "file2.py": [1]})
data2 = self.make_data_file(suffix="123", lines={"file2.py": range(1, 10)})
self.command_line("debug data")
assert self.stdout() == textwrap.dedent(f"""\
-- data ------------------------------------------------------
path: {data1.data_filename()}
has_arcs: False
2 files:
file1.py: 17 lines
file2.py: 1 line
-----
path: {data2.data_filename()}
has_arcs: False
1 file:
file2.py: 9 lines
""")
class CmdLineStdoutTest(BaseCmdLineTest):
"""Test the command line with real stdout output."""
def test_minimum_help(self):
self.command_line("")
out = self.stdout()
assert "Code coverage for Python" in out
assert out.count("\n") < 4
def test_version(self):
self.command_line("--version")
out = self.stdout()
assert "ersion " in out
if env.C_TRACER:
assert "with C extension" in out
else:
assert "without C extension" in out
assert out.count("\n") < 4
@pytest.mark.skipif(env.JYTHON, reason="Jython gets mad if you patch sys.argv")
def test_help_contains_command_name(self):
# Command name should be present in help output.
fake_command_path = os_sep("lorem/ipsum/dolor")
expected_command_name = "dolor"
fake_argv = [fake_command_path, "sit", "amet"]
with mock.patch.object(sys, 'argv', new=fake_argv):
self.command_line("help")
out = self.stdout()
assert expected_command_name in out
@pytest.mark.skipif(env.JYTHON, reason="Jython gets mad if you patch sys.argv")
def test_help_contains_command_name_from_package(self):
# Command package name should be present in help output.
#
# When the main module is actually a package's `__main__` module, the resulting command line
# has the `__main__.py` file's patch as the command name. Instead, the command name should
# be derived from the package name.
fake_command_path = os_sep("lorem/ipsum/dolor/__main__.py")
expected_command_name = "dolor"
fake_argv = [fake_command_path, "sit", "amet"]
with mock.patch.object(sys, 'argv', new=fake_argv):
self.command_line("help")
out = self.stdout()
assert expected_command_name in out
def test_help(self):
self.command_line("help")
lines = self.stdout().splitlines()
assert len(lines) > 10
assert lines[-1] == f"Full documentation is at {__url__}"
def test_cmd_help(self):
self.command_line("help run")
out = self.stdout()
lines = out.splitlines()
assert "<pyfile>" in lines[0]
assert "--timid" in out
assert len(lines) > 20
assert lines[-1] == f"Full documentation is at {__url__}"
def test_unknown_topic(self):
# Should probably be an ERR return, but meh.
self.command_line("help foobar")
lines = self.stdout().splitlines()
assert lines[0] == "Don't know topic 'foobar'"
assert lines[-1] == f"Full documentation is at {__url__}"
def test_error(self):
self.command_line("fooey kablooey", ret=ERR)
err = self.stderr()
assert "fooey" in err
assert "help" in err
def test_doc_url(self):
assert __url__.startswith("https://coverage.readthedocs.io")
class CmdMainTest(CoverageTest):
"""Tests of coverage.cmdline.main(), using mocking for isolation."""
run_in_temp_dir = False
class CoverageScriptStub:
"""A stub for coverage.cmdline.CoverageScript, used by CmdMainTest."""
def command_line(self, argv):
"""Stub for command_line, the arg determines what it will do."""
if argv[0] == 'hello':
print("Hello, world!")
elif argv[0] == 'raise':
try:
raise Exception("oh noes!")
except:
raise _ExceptionDuringRun(*sys.exc_info())
elif argv[0] == 'internalraise':
raise ValueError("coverage is broken")
elif argv[0] == 'exit':
sys.exit(23)
else:
raise AssertionError(f"Bad CoverageScriptStub: {argv!r}")
return 0
def setUp(self):
super().setUp()
old_CoverageScript = coverage.cmdline.CoverageScript
coverage.cmdline.CoverageScript = self.CoverageScriptStub
self.addCleanup(setattr, coverage.cmdline, 'CoverageScript', old_CoverageScript)
def test_normal(self):
ret = coverage.cmdline.main(['hello'])
assert ret == 0
assert self.stdout() == "Hello, world!\n"
def test_raise(self):
ret = coverage.cmdline.main(['raise'])
assert ret == 1
out, err = self.stdouterr()
assert out == ""
print(err)
err = err.splitlines(keepends=True)
assert err[0] == 'Traceback (most recent call last):\n'
assert ' raise Exception("oh noes!")\n' in err
assert err[-1] == 'Exception: oh noes!\n'
def test_internalraise(self):
with pytest.raises(ValueError, match="coverage is broken"):
coverage.cmdline.main(['internalraise'])
def test_exit(self):
ret = coverage.cmdline.main(['exit'])
assert ret == 23
class CoverageReportingFake:
"""A fake Coverage.coverage test double for FailUnderTest methods."""
# pylint: disable=missing-function-docstring
def __init__(self, report_result, html_result=0, xml_result=0, json_report=0, lcov_result=0):
self.config = CoverageConfig()
self.report_result = report_result
self.html_result = html_result
self.xml_result = xml_result
self.json_result = json_report
self.lcov_result = lcov_result
def set_option(self, optname, optvalue):
self.config.set_option(optname, optvalue)
def get_option(self, optname):
return self.config.get_option(optname)
def load(self):
pass
def report(self, *args_unused, **kwargs_unused):
return self.report_result
def html_report(self, *args_unused, **kwargs_unused):
return self.html_result
def xml_report(self, *args_unused, **kwargs_unused):
return self.xml_result
def json_report(self, *args_unused, **kwargs_unused):
return self.json_result
def lcov_report(self, *args_unused, **kwargs_unused):
return self.lcov_result
class FailUnderTest(CoverageTest):
"""Tests of the --fail-under handling in cmdline.py."""
@pytest.mark.parametrize("results, fail_under, cmd, ret", [
# Command-line switch properly checks the result of reporting functions.
((20, 30, 40, 50, 60), None, "report --fail-under=19", 0),
((20, 30, 40, 50, 60), None, "report --fail-under=21", 2),
((20, 30, 40, 50, 60), None, "html --fail-under=29", 0),
((20, 30, 40, 50, 60), None, "html --fail-under=31", 2),
((20, 30, 40, 50, 60), None, "xml --fail-under=39", 0),
((20, 30, 40, 50, 60), None, "xml --fail-under=41", 2),
((20, 30, 40, 50, 60), None, "json --fail-under=49", 0),
((20, 30, 40, 50, 60), None, "json --fail-under=51", 2),
((20, 30, 40, 50, 60), None, "lcov --fail-under=59", 0),
((20, 30, 40, 50, 60), None, "lcov --fail-under=61", 2),
# Configuration file setting properly checks the result of reporting.
((20, 30, 40, 50, 60), 19, "report", 0),
((20, 30, 40, 50, 60), 21, "report", 2),
((20, 30, 40, 50, 60), 29, "html", 0),
((20, 30, 40, 50, 60), 31, "html", 2),
((20, 30, 40, 50, 60), 39, "xml", 0),
((20, 30, 40, 50, 60), 41, "xml", 2),
((20, 30, 40, 50, 60), 49, "json", 0),
((20, 30, 40, 50, 60), 51, "json", 2),
((20, 30, 40, 50, 60), 59, "lcov", 0),
((20, 30, 40, 50, 60), 61, "lcov", 2),
# Command-line overrides configuration.
((20, 30, 40, 50, 60), 19, "report --fail-under=21", 2),
])
def test_fail_under(self, results, fail_under, cmd, ret):
cov = CoverageReportingFake(*results)
if fail_under is not None:
cov.set_option("report:fail_under", fail_under)
with mock.patch("coverage.cmdline.Coverage", lambda *a,**kw: cov):
self.command_line(cmd, ret)
@pytest.mark.parametrize("result, cmd, ret, msg", [
(20.5, "report --fail-under=20.4 --precision=1", 0, ""),
(20.5, "report --fail-under=20.6 --precision=1", 2,
"Coverage failure: total of 20.5 is less than fail-under=20.6\n"),
(20.12345, "report --fail-under=20.1235 --precision=5", 2,
"Coverage failure: total of 20.12345 is less than fail-under=20.12350\n"),
])
def test_fail_under_with_precision(self, result, cmd, ret, msg):
cov = CoverageReportingFake(report_result=result)
with mock.patch("coverage.cmdline.Coverage", lambda *a,**kw: cov):
self.command_line(cmd, ret)
assert self.stdout() == msg
|
en
| 0.488508
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt Test cmdline.py for coverage.py. Tests of execution paths through the command line interpreter. # Make a dict mapping function names to the default values that cmdline.py # uses when calling the function. Return a Mock suitable for use in CoverageScript. # The mock needs options. # Get the type right for the result of reporting. # Global names in cmdline.py that will be mocked during the tests. Run `args` through the command line, with a Mock. `options` is a dict of names and values to pass to `set_option`. Returns the Mock it used and the status code returned. Assert that the `args` end up executing the sequence in `code`. # Remove all indentation, and execute with mock globals # pylint: disable=eval-used # Many of our functions take a lot of arguments, and cmdline.py # calls them with many. But most of them are just the defaults, which # we don't want to have to repeat in all tests. For each call, apply # the defaults. This lets the tests just mention the interesting ones. Assert that the `args1` executes the same as `args2`. Assert that `m1.mock_calls` and `m2.mock_calls` are the same. # Use a real equality comparison, but if it fails, use a nicer assert # so we can tell what's going on. We have to use the real == first due # to CmdOptionParser.__eq__ Run a command line, and check that it prints the right help. Only the last function call in the mock is checked, which should be the help message that we want to see. Tests that our BaseCmdLineTest helpers work. # All the other tests here use self.cmd_executes_same in successful # ways, so here we just check that it fails. Tests of the coverage.py command line. # coverage annotate [-d DIR] [-i] [--omit DIR,...] [FILE1 FILE2 ...] \ cov = Coverage() cov.load() cov.annotate() \ cov = Coverage() cov.load() cov.annotate(directory="dir1") \ cov = Coverage() cov.load() cov.annotate(ignore_errors=True) \ cov = Coverage(omit=["fooey"]) cov.load() cov.annotate(omit=["fooey"]) \ cov = Coverage(omit=["fooey", "booey"]) cov.load() cov.annotate(omit=["fooey", "booey"]) \ cov = Coverage() cov.load() cov.annotate(morfs=["mod1"]) \ cov = Coverage() cov.load() cov.annotate(morfs=["mod1", "mod2", "mod3"]) # coverage combine with args \ cov = Coverage() cov.combine(["datadir1"], strict=True, keep=False) cov.save() # coverage combine, appending \ cov = Coverage() cov.load() cov.combine(["datadir1"], strict=True, keep=False) cov.save() # coverage combine without args \ cov = Coverage() cov.combine(None, strict=True, keep=False) cov.save() # coverage combine quietly \ cov = Coverage(messages=False) cov.combine(None, strict=True, keep=False) cov.save() \ cov = Coverage(messages=False) cov.combine(None, strict=True, keep=False) cov.save() \ cov = Coverage(data_file="foo.cov") cov.combine(None, strict=True, keep=False) cov.save() # https://github.com/nedbat/coveragepy/issues/385 \ cov = Coverage(config_file='cov.ini') cov.combine(None, strict=True, keep=False) cov.save() \ cov = Coverage(config_file='cov.ini') cov.combine(["data1", "data2/more"], strict=True, keep=False) cov.save() # ... many lines ... # pytest_pyfunc_call : /Users/ned/cov/trunk/.tox/py39/site-packages/_pytest/python.py:183 # test_debug_premain : /Users/ned/cov/trunk/tests/test_cmdline.py:284 # command_line : /Users/ned/cov/trunk/tests/coveragetest.py:309 # command_line : /Users/ned/cov/trunk/tests/coveragetest.py:472 # command_line : /Users/ned/cov/trunk/coverage/cmdline.py:592 # do_debug : /Users/ned/cov/trunk/coverage/cmdline.py:804 # coverage erase \ cov = Coverage() cov.erase() \ cov = Coverage(data_file="foo.cov") cov.erase() # coverage --version # coverage -h # coverage html -d DIR [-i] [--omit DIR,...] [FILE1 FILE2 ...] \ cov = Coverage() cov.load() cov.html_report() \ cov = Coverage() cov.load() cov.html_report(directory="dir1") \ cov = Coverage() cov.load() cov.html_report(ignore_errors=True) \ cov = Coverage(omit=["fooey"]) cov.load() cov.html_report(omit=["fooey"]) \ cov = Coverage(omit=["fooey", "booey"]) cov.load() cov.html_report(omit=["fooey", "booey"]) \ cov = Coverage() cov.load() cov.html_report(morfs=["mod1"]) \ cov = Coverage() cov.load() cov.html_report(morfs=["mod1", "mod2", "mod3"]) \ cov = Coverage() cov.load() cov.html_report(precision=3) \ cov = Coverage() cov.load() cov.html_report(title='Hello_there') \ cov = Coverage(messages=False) cov.load() cov.html_report() \ cov = Coverage(messages=False) cov.load() cov.html_report() # coverage json [-i] [--omit DIR,...] [FILE1 FILE2 ...] \ cov = Coverage() cov.load() cov.json_report() \ cov = Coverage() cov.load() cov.json_report(pretty_print=True) \ cov = Coverage() cov.load() cov.json_report(pretty_print=True, show_contexts=True) \ cov = Coverage() cov.load() cov.json_report(ignore_errors=True) \ cov = Coverage() cov.load() cov.json_report(outfile="myjson.foo") \ cov = Coverage() cov.load() cov.json_report(outfile="-") \ cov = Coverage(omit=["fooey"]) cov.load() cov.json_report(omit=["fooey"]) \ cov = Coverage(omit=["fooey", "booey"]) cov.load() cov.json_report(omit=["fooey", "booey"]) \ cov = Coverage() cov.load() cov.json_report(morfs=["mod1"]) \ cov = Coverage() cov.load() cov.json_report(morfs=["mod1", "mod2", "mod3"]) \ cov = Coverage(messages=False) cov.load() cov.json_report() \ cov = Coverage(messages=False) cov.load() cov.json_report() # coverage lcov [-i] [--omit DIR,...] [FILE1 FILE2 ...] \ cov = Coverage() cov.load() cov.lcov_report() \ cov = Coverage() cov.load() cov.lcov_report(ignore_errors=True) \ cov = Coverage() cov.load() cov.lcov_report(outfile="mylcov.foo") \ cov = Coverage() cov.load() cov.lcov_report(outfile="-") \ cov = Coverage(omit=["fooey"]) cov.load() cov.lcov_report(omit=["fooey"]) \ cov = Coverage(omit=["fooey", "booey"]) cov.load() cov.lcov_report(omit=["fooey", "booey"]) \ cov = Coverage(messages=False) cov.load() cov.lcov_report() \ cov = Coverage(messages=False) cov.load() cov.lcov_report() # coverage report [-m] [-i] [-o DIR,...] [FILE1 FILE2 ...] \ cov = Coverage() cov.load() cov.report(show_missing=None) \ cov = Coverage() cov.load() cov.report(ignore_errors=True) \ cov = Coverage() cov.load() cov.report(show_missing=True) \ cov = Coverage(omit=["fooey"]) cov.load() cov.report(omit=["fooey"]) \ cov = Coverage(omit=["fooey", "booey"]) cov.load() cov.report(omit=["fooey", "booey"]) \ cov = Coverage() cov.load() cov.report(morfs=["mod1"]) \ cov = Coverage() cov.load() cov.report(morfs=["mod1", "mod2", "mod3"]) \ cov = Coverage() cov.load() cov.report(precision=7) \ cov = Coverage() cov.load() cov.report(skip_covered=True) \ cov = Coverage() cov.load() cov.report(skip_covered=False) \ cov = Coverage() cov.load() cov.report(skip_covered=False) \ cov = Coverage() cov.load() cov.report(skip_empty=True) \ cov = Coverage() cov.load() cov.report(contexts=["foo", "bar"]) \ cov = Coverage() cov.load() cov.report(sort='-foo') \ cov = Coverage(data_file="foo.cov.2") cov.load() cov.report(show_missing=None) # coverage run [-p] [-L] [--timid] MODULE.py [ARG1 ARG2 ...] # run calls coverage.erase first. \ cov = Coverage() runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() # run -a combines with an existing data file before saving. \ cov = Coverage() runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.load() cov.start() runner.run() cov.stop() cov.save() # --timid sets a flag, and program arguments get passed through. \ cov = Coverage(timid=True) runner = PyRunner(['foo.py', 'abc', '123'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() # -L sets a flag, and flags for the program don't confuse us. \ cov = Coverage(cover_pylib=True, data_suffix=True) runner = PyRunner(['foo.py', '-a', '-b'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() \ cov = Coverage(branch=True) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() \ cov = Coverage(config_file="myrc.rc") runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() \ cov = Coverage(include=["pre1", "pre2"]) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() \ cov = Coverage(omit=["opre1", "opre2"]) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() \ cov = Coverage(include=["pre1", "pre2"], omit=["opre1", "opre2"]) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() \ cov = Coverage(source=["quux", "hi.there", "/home/bar"]) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() \ cov = Coverage(concurrency=['gevent']) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() \ cov = Coverage(concurrency=['multiprocessing']) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() \ cov = Coverage(concurrency=['gevent', 'thread']) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() \ cov = Coverage(data_file="output.coverage") runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() # You can't use command-line args to add options to multiprocessing # runs, since they won't make it to the subprocesses. You need to use a # config file. \ cov = Coverage(debug=["opt1"]) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() \ cov = Coverage(debug=["opt1","opt2"]) runner = PyRunner(['foo.py'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() \ cov = Coverage() runner = PyRunner(['mymodule'], as_module=True) runner.prepare() cov.start() runner.run() cov.stop() cov.save() \ cov = Coverage() runner = PyRunner(['mymodule', '-qq', 'arg1', 'arg2'], as_module=True) runner.prepare() cov.start() runner.run() cov.stop() cov.save() \ cov = Coverage(branch=True) runner = PyRunner(['mymodule'], as_module=True) runner.prepare() cov.start() runner.run() cov.stop() cov.save() \ cov = Coverage() runner = PyRunner(['myprog.py', 'a', '123', 'a quoted thing', 'xyz'], as_module=False) runner.prepare() cov.start() runner.run() cov.stop() cov.save() \ cov = Coverage() runner = PyRunner(['mymodule', 'thing1', 'thing2'], as_module=True) runner.prepare() cov.start() runner.run() cov.stop() cov.save() \ cov = Coverage() show_help('Nothing to do.') \ cov = Coverage() show_help('No module specified for -m') \ cov = Coverage() show_help('No module specified for -m') # coverage xml [-i] [--omit DIR,...] [FILE1 FILE2 ...] \ cov = Coverage() cov.load() cov.xml_report() \ cov = Coverage() cov.load() cov.xml_report(ignore_errors=True) \ cov = Coverage() cov.load() cov.xml_report(outfile="myxml.foo") \ cov = Coverage() cov.load() cov.xml_report(outfile="-") \ cov = Coverage(omit=["fooey"]) cov.load() cov.xml_report(omit=["fooey"]) \ cov = Coverage(omit=["fooey", "booey"]) cov.load() cov.xml_report(omit=["fooey", "booey"]) \ cov = Coverage() cov.load() cov.xml_report(morfs=["mod1"]) \ cov = Coverage() cov.load() cov.xml_report(morfs=["mod1", "mod2", "mod3"]) \ cov = Coverage(messages=False) cov.load() cov.xml_report() \ cov = Coverage(messages=False) cov.load() cov.xml_report() Test the command line in ways that need temp files. \ -- data ------------------------------------------------------ path: {data.data_filename()} has_arcs: False 2 files: file1.py: 17 lines [a_plugin] file2.py: 23 lines \ -- data ------------------------------------------------------ path: {data.data_filename()} No data collected: file doesn't exist \ -- data ------------------------------------------------------ path: {data1.data_filename()} has_arcs: False 2 files: file1.py: 17 lines file2.py: 1 line ----- path: {data2.data_filename()} has_arcs: False 1 file: file2.py: 9 lines Test the command line with real stdout output. # Command name should be present in help output. # Command package name should be present in help output. # # When the main module is actually a package's `__main__` module, the resulting command line # has the `__main__.py` file's patch as the command name. Instead, the command name should # be derived from the package name. # Should probably be an ERR return, but meh. Tests of coverage.cmdline.main(), using mocking for isolation. A stub for coverage.cmdline.CoverageScript, used by CmdMainTest. Stub for command_line, the arg determines what it will do. A fake Coverage.coverage test double for FailUnderTest methods. # pylint: disable=missing-function-docstring Tests of the --fail-under handling in cmdline.py. # Command-line switch properly checks the result of reporting functions. # Configuration file setting properly checks the result of reporting. # Command-line overrides configuration.
| 2.175381
| 2
|
Team.py
|
jrandj/FPL-draft-picker
| 0
|
6625721
|
import math
from collections import OrderedDict
from tabulate import tabulate
class Team:
"""
A class that represents a team.
Attributes
----------
teamName : str
The name of the team.
teamID : str
The unique identifier of the team.
consolidatedData : object
An instance of ConsolidatedData.
playersInTeam : sequence
A list of the players in the team.
formations : sequence
A list of the possible formations for the team and their projected points total in the next fixture.
candidates_representation:
A subset of players with six game projections added.
formations_representation: str
A representation of the formations of the team.
Methods
-------
get_players_for_team()
Return the players in the team.
get_formations_for_team()
Return team formations in descending order with the highest scoring at the top.
add_player_to_formation
Attempt to add a player to a formation.
generate_candidates_representation():
Generate a representation of the candidates.
generate_formations_representation():
Generate a representation of the formations of the team.
"""
def __init__(self, teamName, teamID, consolidatedData):
self.teamName = teamName
self.teamID = teamID
self.consolidatedData = consolidatedData
self.playersInTeam = self.get_players_for_team(self.teamID, self.consolidatedData)
self.formations = self.get_formations_for_team(self.playersInTeam, self.consolidatedData)
self.candidates_representation = self.generate_candidates_representation()
self.formations_representation = self.generate_formations_representation()
@staticmethod
def get_players_for_team(teamID, consolidatedData):
"""Return the players in the team.
Parameters
----------
teamID : str
The unique identifier of the team.
consolidatedData : object
An instance of ConsolidatedData.
Raises
------
"""
team = []
for i in range(len(consolidatedData.officialAPIData.players['elements'])):
if consolidatedData.officialAPIData.players['elements'][i]['selected'] == teamID:
team.append(consolidatedData.officialAPIData.players['elements'][i])
return team
@staticmethod
def get_formations_for_team(playersInTeam, consolidatedData):
"""Return team formations in descending order with the highest scoring at the top.
Parameters
----------
playersInTeam : sequence
A list of the players in the team.
consolidatedData : object
An instance of ConsolidatedData.
Raises
------
"""
formations = [{'GKP': 1, 'DEF': 5, 'MID': 3, 'FWD': 2, 'Score': 0},
{'GKP': 1, 'DEF': 5, 'MID': 4, 'FWD': 1, 'Score': 0},
{'GKP': 1, 'DEF': 5, 'MID': 2, 'FWD': 3, 'Score': 0},
{'GKP': 1, 'DEF': 4, 'MID': 3, 'FWD': 3, 'Score': 0},
{'GKP': 1, 'DEF': 4, 'MID': 5, 'FWD': 1, 'Score': 0},
{'GKP': 1, 'DEF': 4, 'MID': 4, 'FWD': 2, 'Score': 0},
{'GKP': 1, 'DEF': 3, 'MID': 5, 'FWD': 2, 'Score': 0},
{'GKP': 1, 'DEF': 3, 'MID': 4, 'FWD': 3, 'Score': 0}]
player_index = 0
total_points = 0
current_formation = {'GKP': 0, 'DEF': 0, 'MID': 0, 'FWD': 0}
nextGameWeek = consolidatedData.projectionsData.sixGameProjections[0].columns.values[-8]
playersInTeam.sort(key=lambda x: (x['position_name'], -x[nextGameWeek]))
for formation in formations:
team_copy = playersInTeam.copy()
while current_formation != formation and len(team_copy) > player_index:
current_player = team_copy[player_index]
# This approach assumes the team is sorted by projected points in the next game week
if Team.add_player_to_formation(current_player, current_formation, formation):
total_points += current_player[nextGameWeek]
del team_copy[player_index]
player_index = 0
else:
player_index = player_index + 1
formation['Score'] = round(total_points, 2)
total_points = 0
player_index = 0
current_formation = {'GKP': 0, 'DEF': 0, 'MID': 0, 'FWD': 0}
formations.sort(key=lambda x: (-x['Score']))
return formations
@staticmethod
def add_player_to_formation(current_player, current_formation, formation):
"""Attempt to add a player to a formation.
Parameters
----------
current_player : dict
The proposed player.
current_formation : dict
The current formation.
formation : dict
The current formation for which the player is proposed.
Raises
------
"""
player_added = True
if current_player['position_name'] == 'GKP' and current_formation.get('GKP') + 1 <= formation.get('GKP'):
current_formation['GKP'] = current_formation['GKP'] + 1
elif current_player['position_name'] == 'DEF' and current_formation.get('DEF') + 1 <= formation.get('DEF'):
current_formation['DEF'] = current_formation['DEF'] + 1
elif current_player['position_name'] == 'MID' and current_formation.get('MID') + 1 <= formation.get('MID'):
current_formation['MID'] = current_formation['MID'] + 1
elif current_player['position_name'] == 'FWD' and current_formation.get('FWD') + 1 <= formation.get('FWD'):
current_formation['FWD'] = current_formation['FWD'] + 1
else:
player_added = False
return player_added
def generate_candidates_representation(self):
"""Generate a representation of the candidates.
Parameters
----------
Raises
------
"""
printListPoints = []
printListIctIndex = []
sixGameProjectionHeader = self.consolidatedData.projectionsData.sixGameProjections[0].columns.values[-2]
nextGameWeekHeader = self.consolidatedData.projectionsData.sixGameProjections[0].columns.values[-8]
for i in self.playersInTeam:
printDictPoints = OrderedDict((k, i[k]) for k in (
'web_name', 'team_name', 'position_name', sixGameProjectionHeader, nextGameWeekHeader, 'candidates',
'candidates_this_gw'))
printListPoints.append(printDictPoints)
printDictIctIndex = OrderedDict(
(k, i[k]) for k in ('web_name', 'team_name', 'position_name', 'ict_index', 'ict_index_candidates'))
printListIctIndex.append(printDictIctIndex)
sortedPrintListPoints = sorted(printListPoints, key=lambda x: (x['position_name'], -x[sixGameProjectionHeader]))
sortedPrintListIctIndex = sorted(printListIctIndex, key=lambda x: (x['position_name'], -float(x['ict_index'])))
# print(tabulate(sortedPrintListPoints, headers="keys", tablefmt="github"))
# print(tabulate(sortedPrintListIctIndex, headers="keys", tablefmt="github"))
expected_results = [i for i in self.consolidatedData.officialAPIData.players['elements'] if i['status'] != 'u']
failed_merge = [i for i in self.consolidatedData.officialAPIData.players['elements'] if
i['merge_status_six_game'] != 'both' and i['status'] != 'u']
no_projections = [i for i in self.consolidatedData.officialAPIData.players['elements'] if
math.isnan(i[sixGameProjectionHeader]) and i['status'] != 'u' and i[
'merge_status_six_game'] == 'both']
failed_merge_player_info = [
[i["web_name_clean"], i["team_name"], i["position_name"], i["merge_status_six_game"]]
for i in failed_merge]
no_projections_player_info = [
[i["web_name_clean"], i["team_name"], i["position_name"], i["merge_status_six_game"]]
for i in no_projections]
candidates_representation = str(
tabulate(sortedPrintListPoints, headers="keys", tablefmt="html", stralign="left", numalign="left",
colalign="left") + "<br>" +
tabulate(sortedPrintListIctIndex, headers="keys", tablefmt="html", stralign="left", numalign="left",
colalign="left") +
"<br>" + str(len(expected_results))
+ " active players from the Official Fantasy Premier League API have been matched to "
+ str(len(expected_results) - len(failed_merge) - len(no_projections))
+ " Fantasy Football Scout six game projections."
+ "<br>" + "The following merge failures occurred between the official Fantasy Premier League API and "
"the Fantasy Football Scout six game projections: "
+ str(failed_merge_player_info)
+ "<br> The following players were matched but have an invalid Fantasy Football Scout six game projection: "
+ str(no_projections_player_info)) + "<br>"
return candidates_representation
def generate_formations_representation(self):
"""Generate a representation of the formations of the team.
Parameters
----------
Raises
------
"""
formations_representation = "Formations and their scores: " + str(
sorted(self.formations, key=lambda x: (x['Score']), reverse=True)) + "<br>"
return formations_representation
|
import math
from collections import OrderedDict
from tabulate import tabulate
class Team:
"""
A class that represents a team.
Attributes
----------
teamName : str
The name of the team.
teamID : str
The unique identifier of the team.
consolidatedData : object
An instance of ConsolidatedData.
playersInTeam : sequence
A list of the players in the team.
formations : sequence
A list of the possible formations for the team and their projected points total in the next fixture.
candidates_representation:
A subset of players with six game projections added.
formations_representation: str
A representation of the formations of the team.
Methods
-------
get_players_for_team()
Return the players in the team.
get_formations_for_team()
Return team formations in descending order with the highest scoring at the top.
add_player_to_formation
Attempt to add a player to a formation.
generate_candidates_representation():
Generate a representation of the candidates.
generate_formations_representation():
Generate a representation of the formations of the team.
"""
def __init__(self, teamName, teamID, consolidatedData):
self.teamName = teamName
self.teamID = teamID
self.consolidatedData = consolidatedData
self.playersInTeam = self.get_players_for_team(self.teamID, self.consolidatedData)
self.formations = self.get_formations_for_team(self.playersInTeam, self.consolidatedData)
self.candidates_representation = self.generate_candidates_representation()
self.formations_representation = self.generate_formations_representation()
@staticmethod
def get_players_for_team(teamID, consolidatedData):
"""Return the players in the team.
Parameters
----------
teamID : str
The unique identifier of the team.
consolidatedData : object
An instance of ConsolidatedData.
Raises
------
"""
team = []
for i in range(len(consolidatedData.officialAPIData.players['elements'])):
if consolidatedData.officialAPIData.players['elements'][i]['selected'] == teamID:
team.append(consolidatedData.officialAPIData.players['elements'][i])
return team
@staticmethod
def get_formations_for_team(playersInTeam, consolidatedData):
"""Return team formations in descending order with the highest scoring at the top.
Parameters
----------
playersInTeam : sequence
A list of the players in the team.
consolidatedData : object
An instance of ConsolidatedData.
Raises
------
"""
formations = [{'GKP': 1, 'DEF': 5, 'MID': 3, 'FWD': 2, 'Score': 0},
{'GKP': 1, 'DEF': 5, 'MID': 4, 'FWD': 1, 'Score': 0},
{'GKP': 1, 'DEF': 5, 'MID': 2, 'FWD': 3, 'Score': 0},
{'GKP': 1, 'DEF': 4, 'MID': 3, 'FWD': 3, 'Score': 0},
{'GKP': 1, 'DEF': 4, 'MID': 5, 'FWD': 1, 'Score': 0},
{'GKP': 1, 'DEF': 4, 'MID': 4, 'FWD': 2, 'Score': 0},
{'GKP': 1, 'DEF': 3, 'MID': 5, 'FWD': 2, 'Score': 0},
{'GKP': 1, 'DEF': 3, 'MID': 4, 'FWD': 3, 'Score': 0}]
player_index = 0
total_points = 0
current_formation = {'GKP': 0, 'DEF': 0, 'MID': 0, 'FWD': 0}
nextGameWeek = consolidatedData.projectionsData.sixGameProjections[0].columns.values[-8]
playersInTeam.sort(key=lambda x: (x['position_name'], -x[nextGameWeek]))
for formation in formations:
team_copy = playersInTeam.copy()
while current_formation != formation and len(team_copy) > player_index:
current_player = team_copy[player_index]
# This approach assumes the team is sorted by projected points in the next game week
if Team.add_player_to_formation(current_player, current_formation, formation):
total_points += current_player[nextGameWeek]
del team_copy[player_index]
player_index = 0
else:
player_index = player_index + 1
formation['Score'] = round(total_points, 2)
total_points = 0
player_index = 0
current_formation = {'GKP': 0, 'DEF': 0, 'MID': 0, 'FWD': 0}
formations.sort(key=lambda x: (-x['Score']))
return formations
@staticmethod
def add_player_to_formation(current_player, current_formation, formation):
"""Attempt to add a player to a formation.
Parameters
----------
current_player : dict
The proposed player.
current_formation : dict
The current formation.
formation : dict
The current formation for which the player is proposed.
Raises
------
"""
player_added = True
if current_player['position_name'] == 'GKP' and current_formation.get('GKP') + 1 <= formation.get('GKP'):
current_formation['GKP'] = current_formation['GKP'] + 1
elif current_player['position_name'] == 'DEF' and current_formation.get('DEF') + 1 <= formation.get('DEF'):
current_formation['DEF'] = current_formation['DEF'] + 1
elif current_player['position_name'] == 'MID' and current_formation.get('MID') + 1 <= formation.get('MID'):
current_formation['MID'] = current_formation['MID'] + 1
elif current_player['position_name'] == 'FWD' and current_formation.get('FWD') + 1 <= formation.get('FWD'):
current_formation['FWD'] = current_formation['FWD'] + 1
else:
player_added = False
return player_added
def generate_candidates_representation(self):
"""Generate a representation of the candidates.
Parameters
----------
Raises
------
"""
printListPoints = []
printListIctIndex = []
sixGameProjectionHeader = self.consolidatedData.projectionsData.sixGameProjections[0].columns.values[-2]
nextGameWeekHeader = self.consolidatedData.projectionsData.sixGameProjections[0].columns.values[-8]
for i in self.playersInTeam:
printDictPoints = OrderedDict((k, i[k]) for k in (
'web_name', 'team_name', 'position_name', sixGameProjectionHeader, nextGameWeekHeader, 'candidates',
'candidates_this_gw'))
printListPoints.append(printDictPoints)
printDictIctIndex = OrderedDict(
(k, i[k]) for k in ('web_name', 'team_name', 'position_name', 'ict_index', 'ict_index_candidates'))
printListIctIndex.append(printDictIctIndex)
sortedPrintListPoints = sorted(printListPoints, key=lambda x: (x['position_name'], -x[sixGameProjectionHeader]))
sortedPrintListIctIndex = sorted(printListIctIndex, key=lambda x: (x['position_name'], -float(x['ict_index'])))
# print(tabulate(sortedPrintListPoints, headers="keys", tablefmt="github"))
# print(tabulate(sortedPrintListIctIndex, headers="keys", tablefmt="github"))
expected_results = [i for i in self.consolidatedData.officialAPIData.players['elements'] if i['status'] != 'u']
failed_merge = [i for i in self.consolidatedData.officialAPIData.players['elements'] if
i['merge_status_six_game'] != 'both' and i['status'] != 'u']
no_projections = [i for i in self.consolidatedData.officialAPIData.players['elements'] if
math.isnan(i[sixGameProjectionHeader]) and i['status'] != 'u' and i[
'merge_status_six_game'] == 'both']
failed_merge_player_info = [
[i["web_name_clean"], i["team_name"], i["position_name"], i["merge_status_six_game"]]
for i in failed_merge]
no_projections_player_info = [
[i["web_name_clean"], i["team_name"], i["position_name"], i["merge_status_six_game"]]
for i in no_projections]
candidates_representation = str(
tabulate(sortedPrintListPoints, headers="keys", tablefmt="html", stralign="left", numalign="left",
colalign="left") + "<br>" +
tabulate(sortedPrintListIctIndex, headers="keys", tablefmt="html", stralign="left", numalign="left",
colalign="left") +
"<br>" + str(len(expected_results))
+ " active players from the Official Fantasy Premier League API have been matched to "
+ str(len(expected_results) - len(failed_merge) - len(no_projections))
+ " Fantasy Football Scout six game projections."
+ "<br>" + "The following merge failures occurred between the official Fantasy Premier League API and "
"the Fantasy Football Scout six game projections: "
+ str(failed_merge_player_info)
+ "<br> The following players were matched but have an invalid Fantasy Football Scout six game projection: "
+ str(no_projections_player_info)) + "<br>"
return candidates_representation
def generate_formations_representation(self):
"""Generate a representation of the formations of the team.
Parameters
----------
Raises
------
"""
formations_representation = "Formations and their scores: " + str(
sorted(self.formations, key=lambda x: (x['Score']), reverse=True)) + "<br>"
return formations_representation
|
en
| 0.805427
|
A class that represents a team. Attributes ---------- teamName : str The name of the team. teamID : str The unique identifier of the team. consolidatedData : object An instance of ConsolidatedData. playersInTeam : sequence A list of the players in the team. formations : sequence A list of the possible formations for the team and their projected points total in the next fixture. candidates_representation: A subset of players with six game projections added. formations_representation: str A representation of the formations of the team. Methods ------- get_players_for_team() Return the players in the team. get_formations_for_team() Return team formations in descending order with the highest scoring at the top. add_player_to_formation Attempt to add a player to a formation. generate_candidates_representation(): Generate a representation of the candidates. generate_formations_representation(): Generate a representation of the formations of the team. Return the players in the team. Parameters ---------- teamID : str The unique identifier of the team. consolidatedData : object An instance of ConsolidatedData. Raises ------ Return team formations in descending order with the highest scoring at the top. Parameters ---------- playersInTeam : sequence A list of the players in the team. consolidatedData : object An instance of ConsolidatedData. Raises ------ # This approach assumes the team is sorted by projected points in the next game week Attempt to add a player to a formation. Parameters ---------- current_player : dict The proposed player. current_formation : dict The current formation. formation : dict The current formation for which the player is proposed. Raises ------ Generate a representation of the candidates. Parameters ---------- Raises ------ # print(tabulate(sortedPrintListPoints, headers="keys", tablefmt="github")) # print(tabulate(sortedPrintListIctIndex, headers="keys", tablefmt="github")) Generate a representation of the formations of the team. Parameters ---------- Raises ------
| 3.725908
| 4
|
setup.py
|
timeyyy/async_gui
| 38
|
6625722
|
<gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import glob
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'test':
os.chdir('tests')
for test in glob.glob('*.py'):
os.system('python %s' % test)
sys.exit()
if sys.version_info < (3, 2):
install_requires = ['futures']
else:
install_requires = []
setup(
name='async_gui',
packages=[
'async_gui',
'async_gui.toolkits',
],
version='0.1.1',
license='BSD License',
description='Easy threading and multiprocessing for GUI applications',
long_description=open('README.rst').read() + '\n\n' +
open('HISTORY.rst').read(),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/reclosedev/async_gui',
keywords=['GUI', 'thread', 'coroutine', 'futures', 'async'],
install_requires=install_requires,
include_package_data=True,
classifiers=[
'License :: OSI Approved :: BSD License',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import glob
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'test':
os.chdir('tests')
for test in glob.glob('*.py'):
os.system('python %s' % test)
sys.exit()
if sys.version_info < (3, 2):
install_requires = ['futures']
else:
install_requires = []
setup(
name='async_gui',
packages=[
'async_gui',
'async_gui.toolkits',
],
version='0.1.1',
license='BSD License',
description='Easy threading and multiprocessing for GUI applications',
long_description=open('README.rst').read() + '\n\n' +
open('HISTORY.rst').read(),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/reclosedev/async_gui',
keywords=['GUI', 'thread', 'coroutine', 'futures', 'async'],
install_requires=install_requires,
include_package_data=True,
classifiers=[
'License :: OSI Approved :: BSD License',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
)
|
en
| 0.352855
|
#!/usr/bin/env python # -*- coding: utf-8 -*-
| 1.481646
| 1
|
CodeWars/2016/SquareEveryDigit-6k.py
|
JLJTECH/TutorialTesting
| 0
|
6625723
|
<reponame>JLJTECH/TutorialTesting<gh_stars>0
#Square every digit of a passed int
def square_digits(num):
squares = []
for i in str(num):
squares.append(int(i)**2)
x = ''.join(map(str, squares))
print(x.replace("'", ""))
#Alternate solution
def square_digits(num):
ret = ""
for x in str(num):
ret += str(int(x)**2)
return int(ret)
|
#Square every digit of a passed int
def square_digits(num):
squares = []
for i in str(num):
squares.append(int(i)**2)
x = ''.join(map(str, squares))
print(x.replace("'", ""))
#Alternate solution
def square_digits(num):
ret = ""
for x in str(num):
ret += str(int(x)**2)
return int(ret)
|
en
| 0.697886
|
#Square every digit of a passed int #Alternate solution
| 3.835134
| 4
|
aRibeiro/opengl/GLVertexArrayObject.py
|
A-Ribeiro/ARibeiroPythonFramework
| 1
|
6625724
|
#
# For this is how God loved the world:<br/>
# he gave his only Son, so that everyone<br/>
# who believes in him may not perish<br/>
# but may have eternal life.
#
# John 3:16
#
from OpenGL.GL import *
from aRibeiro.window import *
class GLVertexArrayObject:
def __init__(self, window:Window):
self.window = window
self.mVAO = glGenVertexArrays(1)
def __del__(self):
self.dispose()
def dispose(self):
if self.mVAO == None:
return
if self.window.active():
glDeleteVertexArrays(1, [self.mVAO])
self.mVAO = None
def enable(self):
glBindVertexArray(self.mVAO)
def disable(self):
glBindVertexArray(0)
def drawIndex(primitive, count, type, offset):
glDrawElements(primitive,count,type,offset)
def drawArrays(primitive, count, offset):
glDrawArrays(primitive, offset, count)
|
#
# For this is how God loved the world:<br/>
# he gave his only Son, so that everyone<br/>
# who believes in him may not perish<br/>
# but may have eternal life.
#
# John 3:16
#
from OpenGL.GL import *
from aRibeiro.window import *
class GLVertexArrayObject:
def __init__(self, window:Window):
self.window = window
self.mVAO = glGenVertexArrays(1)
def __del__(self):
self.dispose()
def dispose(self):
if self.mVAO == None:
return
if self.window.active():
glDeleteVertexArrays(1, [self.mVAO])
self.mVAO = None
def enable(self):
glBindVertexArray(self.mVAO)
def disable(self):
glBindVertexArray(0)
def drawIndex(primitive, count, type, offset):
glDrawElements(primitive,count,type,offset)
def drawArrays(primitive, count, offset):
glDrawArrays(primitive, offset, count)
|
en
| 0.973201
|
# # For this is how God loved the world:<br/> # he gave his only Son, so that everyone<br/> # who believes in him may not perish<br/> # but may have eternal life. # # John 3:16 #
| 2.768767
| 3
|
training/localized_linear_model/TrainingEpoch.py
|
khoehlein/CNNs-for-Wind-Field-Downscaling
| 5
|
6625725
|
<reponame>khoehlein/CNNs-for-Wind-Field-Downscaling
import torch
from training.modular_downscaling_model.TrainingEpoch import TrainingEpoch as BaseEpoch
class TrainingEpoch(BaseEpoch):
def __init__(self, training_process):
super(TrainingEpoch, self).__init__(training_process=training_process)
def _prepare_data(self, batch):
(
target,
input_lr, input_hr,
mask_lr, mask_hr,
*remaining
) = batch
inputs = []
if len(input_lr) > 0:
inputs.append(input_lr.to(self.training_process.config.device))
if len(input_hr) > 0:
inputs.append(input_hr.to(self.training_process.config.device))
inputs = torch.cat(inputs, dim=-1)
if len(target) > 0:
target = target.to(self.training_process.config.device)
shape = mask_lr.shape[-2:]
if len(mask_lr) > 0:
mask_lr = mask_lr.to(self.training_process.config.device)
mask_lr = (1. - mask_lr)
mask_lr = mask_lr * ((shape[0] * shape[1]) / torch.sum(mask_lr, dim=[1, 2], keepdim=True))
shape = mask_hr.shape[-2:]
if len(mask_hr) > 0:
mask_hr = mask_hr.to(self.training_process.config.device)
mask_hr = (1. - mask_hr)
mask_hr = mask_hr * ((shape[0] * shape[1]) / torch.sum(mask_hr, dim=[1, 2], keepdim=True))
if len(remaining) > 0:
assert len(remaining) == 2
offset_lr, offset_hr = remaining
else:
offset_lr = None
offset_hr = None
return target, inputs, mask_lr, mask_hr, offset_lr, offset_hr
def _apply_model(self, inputs_device, offset_lr, offset_hr):
predictions_device = self.training_process.model(inputs_device)
interpolate = None
return predictions_device, interpolate
def _update_losses(self, predictions_device, targets_device, interpolate, mask_hr_device, offset_hr):
loss, complementary = self.losses(
predictions_device, targets_device,
mask=mask_hr_device,
scalings=self.training_process.grids.target_scalings,
offset=offset_hr,
ndim=1
)
for loss_name in complementary.keys():
self.cumulative_losses.update({
loss_name: self.cumulative_losses[loss_name] + complementary[loss_name].item()
})
return loss
|
import torch
from training.modular_downscaling_model.TrainingEpoch import TrainingEpoch as BaseEpoch
class TrainingEpoch(BaseEpoch):
def __init__(self, training_process):
super(TrainingEpoch, self).__init__(training_process=training_process)
def _prepare_data(self, batch):
(
target,
input_lr, input_hr,
mask_lr, mask_hr,
*remaining
) = batch
inputs = []
if len(input_lr) > 0:
inputs.append(input_lr.to(self.training_process.config.device))
if len(input_hr) > 0:
inputs.append(input_hr.to(self.training_process.config.device))
inputs = torch.cat(inputs, dim=-1)
if len(target) > 0:
target = target.to(self.training_process.config.device)
shape = mask_lr.shape[-2:]
if len(mask_lr) > 0:
mask_lr = mask_lr.to(self.training_process.config.device)
mask_lr = (1. - mask_lr)
mask_lr = mask_lr * ((shape[0] * shape[1]) / torch.sum(mask_lr, dim=[1, 2], keepdim=True))
shape = mask_hr.shape[-2:]
if len(mask_hr) > 0:
mask_hr = mask_hr.to(self.training_process.config.device)
mask_hr = (1. - mask_hr)
mask_hr = mask_hr * ((shape[0] * shape[1]) / torch.sum(mask_hr, dim=[1, 2], keepdim=True))
if len(remaining) > 0:
assert len(remaining) == 2
offset_lr, offset_hr = remaining
else:
offset_lr = None
offset_hr = None
return target, inputs, mask_lr, mask_hr, offset_lr, offset_hr
def _apply_model(self, inputs_device, offset_lr, offset_hr):
predictions_device = self.training_process.model(inputs_device)
interpolate = None
return predictions_device, interpolate
def _update_losses(self, predictions_device, targets_device, interpolate, mask_hr_device, offset_hr):
loss, complementary = self.losses(
predictions_device, targets_device,
mask=mask_hr_device,
scalings=self.training_process.grids.target_scalings,
offset=offset_hr,
ndim=1
)
for loss_name in complementary.keys():
self.cumulative_losses.update({
loss_name: self.cumulative_losses[loss_name] + complementary[loss_name].item()
})
return loss
|
none
| 1
| 2.154437
| 2
|
|
sup.py
|
k-webb/supreme
| 10
|
6625726
|
import requests as r
from datetime import datetime
import json, re, socket, time, sys, random
from slackclient import SlackClient
def UTCtoEST():
current = datetime.now()
return str(current) + " CST"
socket.setdefaulttimeout(2)
sc = SlackClient("SLACK KEY HERE")
class Supreme:
def __init__(self):
self.headers = {'user-agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36'}
self.totalstock = 0
self.l1 = []
def get_stock(self,link,p):
###USE PROXIES, RANDOM CHOICE
'''
with open('proxies.txt','r+') as goodproxies:
proxies = goodproxies.read().splitlines()
x = random.choice(proxies)
bs = {'http': x}
print (bs)
#HARD CODE PROXIES
#proxies = {'http': '192.168.127.12:53281','https': '192.168.127.12:53281'}
a = r.get(link,headers=self.headers,proxies=bs)
'''
##NO PROXIES , DEFAULT
a = r.get(link,headers=self.headers)
print (a.status_code)
#print (a.text)
b = json.loads(a.text)
info = b[u'styles']
for value in info:
pic = value[u'mobile_zoomed_url']
pic = 'https:'+str(pic)
color = value[u'name']
sizes = value[u'sizes']
if p == True:
print ('{0} {1}'.format(color,pic))
for size in sizes:
sizelabel = size[u'name']
sizeid = size[u'id']
stock = size[u'stock_level']
self.totalstock = self.totalstock + stock
if p == True:
m = ('{0} {1} {2} {3} {4}'.format(color, pic, sizelabel, sizeid, stock))
#print (m)
if stock != 0:
self.l1.append(m)
if p == True:
print ('\n')
def monitor(self,link):
def send_message(team,channel,username, title, link, site, sizes, thumb):
attachments = [
{
"color": "#36a64f",
"title": title,
"title_link": link,
"text": "Site: %s"%site,
"fields": [
{
"title": "Sizes Available",
"value": sizes,
"short": False
}
],
#"image_url": thumb
"thumb_url": thumb,
"ts": int(time.time())
}
]
try:
res = team.api_call("chat.postMessage", channel=channel, attachments=attachments, username=username,icon_emoji=':hocho:')
if not res.get('ok'):
print('error: {}', res.get('error'))
except Exception as y:
print (y)
print('send_message failed')
if self.totalstock == 0:
try:
self.get_stock(link,p=True)
if self.totalstock == 0:
print ('--- CHECK STATUS --- OUT OF STOCK %s'%UTCtoEST())
time.sleep(int(self.interval))
self.monitor(link)
else:
send_message(sc,'#CHANNEL NAME HERE','BOT USER NAME HERE','titleOfProduct',link[:-5],'<SupremeNY|http://www.supremenewyork.com/shop/all>','strcartlinks','img')
##INSERT SLACK [X]/ TWITTER [ ]/ CHECKOUT [ ] FUNCTION HERE
print('--- CHECK STATUS --- RESTOCK\n%s -- %s'%(link,UTCtoEST()))
except Exception as monitor_error:
print ('MONITOR ERROR\n%s'%monitor_error)
pass
def prompt(self):
link = input('Please Enter A Link To Monitor..\n')
link = str(link)+'.json'
try:
self.get_stock(link,p=True)
except Exception as get_stock_error:
print (get_stock_error)
pass
print ('{0} {1}'.format('TOTAL STOCK - ',self.totalstock))
if self.totalstock == 0:
restock_answer = input('This product is out of stock, start restock mode? Enter - (y/n)\n')
if restock_answer.lower() == 'y':
self.interval = input('Please Enter An Interval..\n')
self.monitor(link)
else:
print (restock_answer)
sys.exit()
instance = Supreme()
instance.prompt()
|
import requests as r
from datetime import datetime
import json, re, socket, time, sys, random
from slackclient import SlackClient
def UTCtoEST():
current = datetime.now()
return str(current) + " CST"
socket.setdefaulttimeout(2)
sc = SlackClient("SLACK KEY HERE")
class Supreme:
def __init__(self):
self.headers = {'user-agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36'}
self.totalstock = 0
self.l1 = []
def get_stock(self,link,p):
###USE PROXIES, RANDOM CHOICE
'''
with open('proxies.txt','r+') as goodproxies:
proxies = goodproxies.read().splitlines()
x = random.choice(proxies)
bs = {'http': x}
print (bs)
#HARD CODE PROXIES
#proxies = {'http': '192.168.127.12:53281','https': '192.168.127.12:53281'}
a = r.get(link,headers=self.headers,proxies=bs)
'''
##NO PROXIES , DEFAULT
a = r.get(link,headers=self.headers)
print (a.status_code)
#print (a.text)
b = json.loads(a.text)
info = b[u'styles']
for value in info:
pic = value[u'mobile_zoomed_url']
pic = 'https:'+str(pic)
color = value[u'name']
sizes = value[u'sizes']
if p == True:
print ('{0} {1}'.format(color,pic))
for size in sizes:
sizelabel = size[u'name']
sizeid = size[u'id']
stock = size[u'stock_level']
self.totalstock = self.totalstock + stock
if p == True:
m = ('{0} {1} {2} {3} {4}'.format(color, pic, sizelabel, sizeid, stock))
#print (m)
if stock != 0:
self.l1.append(m)
if p == True:
print ('\n')
def monitor(self,link):
def send_message(team,channel,username, title, link, site, sizes, thumb):
attachments = [
{
"color": "#36a64f",
"title": title,
"title_link": link,
"text": "Site: %s"%site,
"fields": [
{
"title": "Sizes Available",
"value": sizes,
"short": False
}
],
#"image_url": thumb
"thumb_url": thumb,
"ts": int(time.time())
}
]
try:
res = team.api_call("chat.postMessage", channel=channel, attachments=attachments, username=username,icon_emoji=':hocho:')
if not res.get('ok'):
print('error: {}', res.get('error'))
except Exception as y:
print (y)
print('send_message failed')
if self.totalstock == 0:
try:
self.get_stock(link,p=True)
if self.totalstock == 0:
print ('--- CHECK STATUS --- OUT OF STOCK %s'%UTCtoEST())
time.sleep(int(self.interval))
self.monitor(link)
else:
send_message(sc,'#CHANNEL NAME HERE','BOT USER NAME HERE','titleOfProduct',link[:-5],'<SupremeNY|http://www.supremenewyork.com/shop/all>','strcartlinks','img')
##INSERT SLACK [X]/ TWITTER [ ]/ CHECKOUT [ ] FUNCTION HERE
print('--- CHECK STATUS --- RESTOCK\n%s -- %s'%(link,UTCtoEST()))
except Exception as monitor_error:
print ('MONITOR ERROR\n%s'%monitor_error)
pass
def prompt(self):
link = input('Please Enter A Link To Monitor..\n')
link = str(link)+'.json'
try:
self.get_stock(link,p=True)
except Exception as get_stock_error:
print (get_stock_error)
pass
print ('{0} {1}'.format('TOTAL STOCK - ',self.totalstock))
if self.totalstock == 0:
restock_answer = input('This product is out of stock, start restock mode? Enter - (y/n)\n')
if restock_answer.lower() == 'y':
self.interval = input('Please Enter An Interval..\n')
self.monitor(link)
else:
print (restock_answer)
sys.exit()
instance = Supreme()
instance.prompt()
|
en
| 0.452319
|
###USE PROXIES, RANDOM CHOICE with open('proxies.txt','r+') as goodproxies: proxies = goodproxies.read().splitlines() x = random.choice(proxies) bs = {'http': x} print (bs) #HARD CODE PROXIES #proxies = {'http': '192.168.127.12:53281','https': '192.168.127.12:53281'} a = r.get(link,headers=self.headers,proxies=bs) ##NO PROXIES , DEFAULT #print (a.text) #print (m) #"image_url": thumb ##INSERT SLACK [X]/ TWITTER [ ]/ CHECKOUT [ ] FUNCTION HERE
| 2.859648
| 3
|
scripts/image_compare.py
|
young-oct/OCT-sparse-estimation-with-CBPDN-framework
| 1
|
6625727
|
# -*- coding: utf-8 -*-
# @Time : 2021-04-26 3:49 p.m.
# @Author : <NAME>
# @FileName: image_compare.py
# @Software: PyCharm
'''From left to right: OCT images of a middle ear,
index finger (palmar view), index finger (side view),
and onion slice. The white arrow indicates the sidelobe
artifacts caused by the PSF convolution'''
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
from sporco.admm import cbpdn
from misc import processing
from scipy.ndimage import median_filter
import polarTransform
# Module level constants
eps = 1e-14
if __name__ == '__main__':
plt.close('all')
# Customize matplotlib params
matplotlib.rcParams.update(
{
'font.size': 15,
'text.usetex': False,
'font.family': 'sans-serif',
'mathtext.fontset': 'stix',
}
)
file_name = ['ear', 'finger', 'nail', 'onion']
title_name = [r'(a) middle ear', r'(b) index finger (palmar view)', r'(c) index finger (side view)', r'(d) onion slice']
original = []
sparse = []
lmbda = [0.05,0.03,0.02,0.04]
w_lmbda = 0.05
speckle_weight = 0.1
rvmin, vmax = 5, 55 #dB
for i in range(len(file_name)):
Ear = False
# Load the example dataset
s, D = processing.load_data(file_name[i], decimation_factor=20)
# l2 norm data and save the scaling factor
l2f, snorm = processing.to_l2_normed(s)
opt_par = cbpdn.ConvBPDN.Options({'FastSolve': True, 'Verbose': False, 'StatusHeader': False,
'MaxMainIter': 20, 'RelStopTol': 5e-5, 'AuxVarObj': True,
'RelaxParam': 1.515, 'AutoRho': {'Enabled': True}})
# obtain weighting mask
if file_name[i] == 'ear':
Ear = True
else:
pass
x = processing.make_sparse_representation(s, D, lmbda[i],w_lmbda, speckle_weight,Ear= Ear)
x_log = 20 * np.log10(abs(x))
s_log = 20 * np.log10(abs(s))
original.append(s_log)
sparse.append(x_log)
x_head = [300, 200, 240, 250]
y_head = [110, 125, 170, 120]
x_end = [350, 150, 190, 190]
y_end = [90, 105, 150, 100]
aspect = original[0].shape[1]/original[0].shape[0]
fig, ax = plt.subplots(nrows=2, ncols=4, sharey=True, sharex=True, figsize=(16, 9),constrained_layout=True )
cartesianImage=x_log
for i in range(len(file_name)):
title = '\n'.join((title_name[i],r'$𝜆$ = %.2f,$W$ = %.1f' % (lmbda[i], speckle_weight)))
ax[0, i].set_title(title,fontsize=20)
ax[0, i].imshow(original[i], 'gray',aspect=aspect,vmax=vmax, vmin=rvmin,interpolation='none')
#ax[0, i].annotate('', xy=(x_head[i], y_head[i]), xycoords='data',
# xytext=(x_end[i], y_end[i]), textcoords='data',
# arrowprops=dict(facecolor='white', shrink=0.05),
# horizontalalignment='right', verticalalignment='top',
# )
ax[1, i].imshow(sparse[i], 'gray',aspect=aspect,vmax=vmax, vmin=rvmin,interpolation='none')
ax[0, i].set_axis_off()
ax[1, i].set_axis_off()
plt.show()
fig.savefig('../Images/image_compare.jpeg',
dpi = 800,
transparent=True,format = 'jpeg')
# from numpy import pi
# #plt.close('all')
# ear_image=sparse[0]
# ear_image[0,:]=vmax
# ear_image[-1,:]=vmax
# ear_image[:,0]=vmax
# ear_image[:,-1]=vmax
# ear_image = median_filter(ear_image, size=(2, 2))
# for i in range(ear_image.shape[0]):
# for j in range(ear_image.shape[1]):
# if ear_image[i,j]<rvmin:
# ear_image[i,j]=rvmin
# if ear_image[i,j]>vmax:
# ear_image[i,j]=vmax
#
#
#
# opening_angle=60 #deg
# polarImage, ptSettings = polarTransform.convertToCartesianImage(ear_image.T, initialRadius=300, finalRadius=812, initialAngle=-opening_angle*pi/360, finalAngle=opening_angle*pi/360)
# plt.figure()
# plt.imshow(polarImage.T[::-1,:], 'gray',aspect=aspect,vmax=vmax, interpolation='none', vmin=rvmin, origin='lower')
# plt.figure()
# plt.imshow(ear_image, 'gray',aspect=aspect,vmax=vmax, vmin=rvmin, interpolation='none', origin='lower')
|
# -*- coding: utf-8 -*-
# @Time : 2021-04-26 3:49 p.m.
# @Author : <NAME>
# @FileName: image_compare.py
# @Software: PyCharm
'''From left to right: OCT images of a middle ear,
index finger (palmar view), index finger (side view),
and onion slice. The white arrow indicates the sidelobe
artifacts caused by the PSF convolution'''
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
from sporco.admm import cbpdn
from misc import processing
from scipy.ndimage import median_filter
import polarTransform
# Module level constants
eps = 1e-14
if __name__ == '__main__':
plt.close('all')
# Customize matplotlib params
matplotlib.rcParams.update(
{
'font.size': 15,
'text.usetex': False,
'font.family': 'sans-serif',
'mathtext.fontset': 'stix',
}
)
file_name = ['ear', 'finger', 'nail', 'onion']
title_name = [r'(a) middle ear', r'(b) index finger (palmar view)', r'(c) index finger (side view)', r'(d) onion slice']
original = []
sparse = []
lmbda = [0.05,0.03,0.02,0.04]
w_lmbda = 0.05
speckle_weight = 0.1
rvmin, vmax = 5, 55 #dB
for i in range(len(file_name)):
Ear = False
# Load the example dataset
s, D = processing.load_data(file_name[i], decimation_factor=20)
# l2 norm data and save the scaling factor
l2f, snorm = processing.to_l2_normed(s)
opt_par = cbpdn.ConvBPDN.Options({'FastSolve': True, 'Verbose': False, 'StatusHeader': False,
'MaxMainIter': 20, 'RelStopTol': 5e-5, 'AuxVarObj': True,
'RelaxParam': 1.515, 'AutoRho': {'Enabled': True}})
# obtain weighting mask
if file_name[i] == 'ear':
Ear = True
else:
pass
x = processing.make_sparse_representation(s, D, lmbda[i],w_lmbda, speckle_weight,Ear= Ear)
x_log = 20 * np.log10(abs(x))
s_log = 20 * np.log10(abs(s))
original.append(s_log)
sparse.append(x_log)
x_head = [300, 200, 240, 250]
y_head = [110, 125, 170, 120]
x_end = [350, 150, 190, 190]
y_end = [90, 105, 150, 100]
aspect = original[0].shape[1]/original[0].shape[0]
fig, ax = plt.subplots(nrows=2, ncols=4, sharey=True, sharex=True, figsize=(16, 9),constrained_layout=True )
cartesianImage=x_log
for i in range(len(file_name)):
title = '\n'.join((title_name[i],r'$𝜆$ = %.2f,$W$ = %.1f' % (lmbda[i], speckle_weight)))
ax[0, i].set_title(title,fontsize=20)
ax[0, i].imshow(original[i], 'gray',aspect=aspect,vmax=vmax, vmin=rvmin,interpolation='none')
#ax[0, i].annotate('', xy=(x_head[i], y_head[i]), xycoords='data',
# xytext=(x_end[i], y_end[i]), textcoords='data',
# arrowprops=dict(facecolor='white', shrink=0.05),
# horizontalalignment='right', verticalalignment='top',
# )
ax[1, i].imshow(sparse[i], 'gray',aspect=aspect,vmax=vmax, vmin=rvmin,interpolation='none')
ax[0, i].set_axis_off()
ax[1, i].set_axis_off()
plt.show()
fig.savefig('../Images/image_compare.jpeg',
dpi = 800,
transparent=True,format = 'jpeg')
# from numpy import pi
# #plt.close('all')
# ear_image=sparse[0]
# ear_image[0,:]=vmax
# ear_image[-1,:]=vmax
# ear_image[:,0]=vmax
# ear_image[:,-1]=vmax
# ear_image = median_filter(ear_image, size=(2, 2))
# for i in range(ear_image.shape[0]):
# for j in range(ear_image.shape[1]):
# if ear_image[i,j]<rvmin:
# ear_image[i,j]=rvmin
# if ear_image[i,j]>vmax:
# ear_image[i,j]=vmax
#
#
#
# opening_angle=60 #deg
# polarImage, ptSettings = polarTransform.convertToCartesianImage(ear_image.T, initialRadius=300, finalRadius=812, initialAngle=-opening_angle*pi/360, finalAngle=opening_angle*pi/360)
# plt.figure()
# plt.imshow(polarImage.T[::-1,:], 'gray',aspect=aspect,vmax=vmax, interpolation='none', vmin=rvmin, origin='lower')
# plt.figure()
# plt.imshow(ear_image, 'gray',aspect=aspect,vmax=vmax, vmin=rvmin, interpolation='none', origin='lower')
|
en
| 0.393699
|
# -*- coding: utf-8 -*- # @Time : 2021-04-26 3:49 p.m. # @Author : <NAME> # @FileName: image_compare.py # @Software: PyCharm From left to right: OCT images of a middle ear, index finger (palmar view), index finger (side view), and onion slice. The white arrow indicates the sidelobe artifacts caused by the PSF convolution # Module level constants # Customize matplotlib params #dB # Load the example dataset # l2 norm data and save the scaling factor # obtain weighting mask #ax[0, i].annotate('', xy=(x_head[i], y_head[i]), xycoords='data', # xytext=(x_end[i], y_end[i]), textcoords='data', # arrowprops=dict(facecolor='white', shrink=0.05), # horizontalalignment='right', verticalalignment='top', # ) # from numpy import pi # #plt.close('all') # ear_image=sparse[0] # ear_image[0,:]=vmax # ear_image[-1,:]=vmax # ear_image[:,0]=vmax # ear_image[:,-1]=vmax # ear_image = median_filter(ear_image, size=(2, 2)) # for i in range(ear_image.shape[0]): # for j in range(ear_image.shape[1]): # if ear_image[i,j]<rvmin: # ear_image[i,j]=rvmin # if ear_image[i,j]>vmax: # ear_image[i,j]=vmax # # # # opening_angle=60 #deg # polarImage, ptSettings = polarTransform.convertToCartesianImage(ear_image.T, initialRadius=300, finalRadius=812, initialAngle=-opening_angle*pi/360, finalAngle=opening_angle*pi/360) # plt.figure() # plt.imshow(polarImage.T[::-1,:], 'gray',aspect=aspect,vmax=vmax, interpolation='none', vmin=rvmin, origin='lower') # plt.figure() # plt.imshow(ear_image, 'gray',aspect=aspect,vmax=vmax, vmin=rvmin, interpolation='none', origin='lower')
| 2.338384
| 2
|
src/compas_tna/equilibrium/vertical.py
|
wenqian157/compas_tna
| 0
|
6625728
|
<filename>src/compas_tna/equilibrium/vertical.py<gh_stars>0
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from numpy import array
from numpy import float64
from scipy.sparse import diags
from scipy.sparse.linalg import spsolve
from compas.numerical import connectivity_matrix
from compas.numerical import normrow
from compas_tna.diagrams import FormDiagram
from compas_tna.utilities import LoadUpdater
from compas_tna.utilities import update_z
__all__ = [
'vertical_from_zmax',
'vertical_from_bbox',
'vertical_from_q',
'vertical_from_zmax_proxy',
'vertical_from_bbox_proxy',
'vertical_from_q_proxy'
]
def vertical_from_zmax_proxy(formdata, *args, **kwargs):
form = FormDiagram.from_data(formdata)
scale = vertical_from_zmax(form, *args, **kwargs)
return form.to_data(), scale
def vertical_from_bbox_proxy(formdata, *args, **kwargs):
form = FormDiagram.from_data(formdata)
scale = vertical_from_bbox(form, *args, **kwargs)
return form.to_data(), scale
def vertical_from_q_proxy(formdata, *args, **kwargs):
form = FormDiagram.from_data(formdata)
vertical_from_q(form, *args, **kwargs)
return form.to_data()
def vertical_from_zmax(form, zmax, kmax=100, xtol=1e-2, rtol=1e-3, density=1.0, display=True):
"""For the given form and force diagram, compute the scale of the force
diagram for which the highest point of the thrust network is equal to a
specified value.
Parameters
----------
form : compas_tna.diagrams.formdiagram.FormDiagram
The form diagram
force : compas_tna.diagrams.forcediagram.ForceDiagram
The corresponding force diagram.
zmax : float
The maximum height of the thrust network (the default is None, which
implies that the maximum height will be equal to a quarter of the diagonal
of the bounding box of the form diagram).
kmax : int
The maximum number of iterations for computing vertical equilibrium
(the default is 100).
tol : float
The stopping criterion.
density : float
The density for computation of the self-weight of the thrust network
(the default is 1.0). Set this to 0.0 to ignore self-weight and only
consider specified point loads.
display : bool
If True, information about the current iteration will be displayed.
Returns
-------
float
The scale of the forcedensities.
"""
xtol2 = xtol ** 2
# --------------------------------------------------------------------------
# FormDiagram
# --------------------------------------------------------------------------
k_i = form.key_index()
uv_i = form.uv_index()
vcount = len(form.vertex)
anchors = list(form.anchors())
fixed = list(form.fixed())
fixed = set(anchors + fixed)
fixed = [k_i[key] for key in fixed]
free = list(set(range(vcount)) - set(fixed))
edges = [(k_i[u], k_i[v]) for u, v in form.edges_where({'is_edge': True})]
xyz = array(form.vertices_attributes('xyz'), dtype=float64)
thick = array(form.vertices_attribute('t'), dtype=float64).reshape((-1, 1))
p = array(form.vertices_attributes(('px', 'py', 'pz')), dtype=float64)
q = [attr.get('q', 1.0) for key, attr in form.edges_where({'is_edge': True}, True)]
q = array(q, dtype=float64).reshape((-1, 1))
C = connectivity_matrix(edges, 'csr')
Ci = C[:, free]
Cf = C[:, fixed]
Cit = Ci.transpose()
Ct = C.transpose()
# --------------------------------------------------------------------------
# original data
# --------------------------------------------------------------------------
p0 = array(p, copy=True)
q0 = array(q, copy=True)
# --------------------------------------------------------------------------
# load updater
# --------------------------------------------------------------------------
update_loads = LoadUpdater(form, p0, thickness=thick, density=density)
# --------------------------------------------------------------------------
# scale to zmax
# note that zmax should not exceed scale * diagonal
# --------------------------------------------------------------------------
scale = 1.0
for k in range(kmax):
if display:
print(k)
update_loads(p, xyz)
q = scale * q0
Q = diags([q.ravel()], [0])
A = Cit.dot(Q).dot(Ci)
b = p[free, 2] - Cit.dot(Q).dot(Cf).dot(xyz[fixed, 2])
xyz[free, 2] = spsolve(A, b)
z = max(xyz[free, 2])
res2 = (z - zmax) ** 2
if res2 < xtol2:
break
scale = scale * (z / zmax)
# --------------------------------------------------------------------------
# vertical
# --------------------------------------------------------------------------
q = scale * q0
Q = diags([q.ravel()], [0])
res = update_z(xyz, Q, C, p, free, fixed, update_loads, tol=rtol, kmax=kmax, display=display)
# --------------------------------------------------------------------------
# update
# --------------------------------------------------------------------------
l = normrow(C.dot(xyz))
f = q * l
r = Ct.dot(Q).dot(C).dot(xyz) - p
# --------------------------------------------------------------------------
# form
# --------------------------------------------------------------------------
for key, attr in form.vertices(True):
index = k_i[key]
attr['z'] = xyz[index, 2]
attr['rx'] = r[index, 0]
attr['ry'] = r[index, 1]
attr['rz'] = r[index, 2]
for key, attr in form.edges_where({'is_edge': True}, True):
index = uv_i[key]
attr['q'] = q[index, 0]
attr['f'] = f[index, 0]
return scale
def vertical_from_bbox(form, factor=5.0, kmax=100, tol=1e-3, density=1.0, display=True):
# --------------------------------------------------------------------------
# FormDiagram
# --------------------------------------------------------------------------
k_i = form.key_index()
uv_i = form.uv_index()
vcount = len(form.vertex)
anchors = list(form.anchors())
fixed = list(form.fixed())
fixed = set(anchors + fixed)
fixed = [k_i[key] for key in fixed]
free = list(set(range(vcount)) - set(fixed))
edges = [(k_i[u], k_i[v]) for u, v in form.edges_where({'is_edge': True})]
xyz = array(form.vertices_attributes('xyz'), dtype=float64)
thick = array(form.vertices_attribute('t'), dtype=float64).reshape((-1, 1))
p = array(form.vertices_attributes(('px', 'py', 'pz')), dtype=float64)
q = [attr.get('q', 1.0) for key, attr in form.edges_where({'is_edge': True}, True)]
q = array(q, dtype=float64).reshape((-1, 1))
C = connectivity_matrix(edges, 'csr')
Ci = C[:, free]
Cf = C[:, fixed]
Cit = Ci.transpose()
Ct = C.transpose()
# --------------------------------------------------------------------------
# original data
# --------------------------------------------------------------------------
p0 = array(p, copy=True)
q0 = array(q, copy=True)
# --------------------------------------------------------------------------
# load updater
# --------------------------------------------------------------------------
update_loads = LoadUpdater(form, p0, thickness=thick, density=density)
# --------------------------------------------------------------------------
# scale
# --------------------------------------------------------------------------
(xmin, ymin, zmin), (xmax, ymax, zmax) = form.bbox()
d = ((xmax - xmin) ** 2 + (ymax - ymin) ** 2) ** 0.5
scale = d / factor
# --------------------------------------------------------------------------
# vertical
# --------------------------------------------------------------------------
q = scale * q0
Q = diags([q.ravel()], [0])
update_z(xyz, Q, C, p, free, fixed, update_loads, tol=tol, kmax=kmax, display=display)
# --------------------------------------------------------------------------
# update
# --------------------------------------------------------------------------
l = normrow(C.dot(xyz))
f = q * l
r = Ct.dot(Q).dot(C).dot(xyz) - p
sw = p - p0
# --------------------------------------------------------------------------
# form
# --------------------------------------------------------------------------
for key, attr in form.vertices(True):
index = k_i[key]
attr['z'] = xyz[index, 2]
attr['rx'] = r[index, 0]
attr['ry'] = r[index, 1]
attr['rz'] = r[index, 2]
attr['sw'] = sw[index, 2]
for key, attr in form.edges_where({'is_edge': True}, True):
index = uv_i[key]
attr['q'] = q[index, 0]
attr['f'] = f[index, 0]
return scale
def vertical_from_q(form, scale=1.0, density=1.0, kmax=100, tol=1e-3, display=True):
"""Compute vertical equilibrium from the force densities of the independent edges.
Parameters
----------
form : FormDiagram
The form diagram
scale : float
The scale of the horizontal forces.
Default is ``1.0``.
density : float, optional
The density for computation of the self-weight of the thrust network.
Set this to 0.0 to ignore self-weight and only consider specified point loads.
Default is ``1.0``.
kmax : int, optional
The maximum number of iterations for computing vertical equilibrium.
Default is ``100``.
tol : float
The stopping criterion.
Default is ``0.001``.
display : bool
Display information about the current iteration.
Default is ``True``.
"""
k_i = form.key_index()
uv_i = form.uv_index()
vcount = form.number_of_vertices()
anchors = list(form.anchors())
fixed = list(form.fixed())
fixed = set(anchors + fixed)
fixed = [k_i[key] for key in fixed]
edges = [(k_i[u], k_i[v]) for u, v in form.edges_where({'is_edge': True})]
free = list(set(range(vcount)) - set(fixed))
xyz = array(form.vertices_attributes('xyz'), dtype=float64)
thick = array(form.vertices_attribute('t'), dtype=float64).reshape((-1, 1))
p = array(form.vertices_attributes(('px', 'py', 'pz')), dtype=float64)
q = [attr.get('q', 1.0) for key, attr in form.edges_where({'is_edge': True}, True)]
q = array(q, dtype=float64).reshape((-1, 1))
C = connectivity_matrix(edges, 'csr')
# --------------------------------------------------------------------------
# original data
# --------------------------------------------------------------------------
p0 = array(p, copy=True)
q0 = array(q, copy=True)
# --------------------------------------------------------------------------
# load updater
# --------------------------------------------------------------------------
update_loads = LoadUpdater(form, p0, thickness=thick, density=density)
# --------------------------------------------------------------------------
# update forcedensity based on given q[ind]
# --------------------------------------------------------------------------
q = scale * q0
Q = diags([q.ravel()], [0])
# --------------------------------------------------------------------------
# compute vertical
# --------------------------------------------------------------------------
update_z(xyz, Q, C, p, free, fixed, update_loads, tol=tol, kmax=kmax, display=display)
# --------------------------------------------------------------------------
# update
# --------------------------------------------------------------------------
l = normrow(C.dot(xyz))
f = q * l
r = C.transpose().dot(Q).dot(C).dot(xyz) - p
sw = p - p0
# --------------------------------------------------------------------------
# form
# --------------------------------------------------------------------------
for key, attr in form.vertices(True):
index = k_i[key]
attr['z'] = xyz[index, 2]
attr['rx'] = r[index, 0]
attr['ry'] = r[index, 1]
attr['rz'] = r[index, 2]
attr['sw'] = sw[index, 2]
for key, attr in form.edges_where({'is_edge': True}, True):
index = uv_i[key]
attr['f'] = f[index, 0]
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
pass
|
<filename>src/compas_tna/equilibrium/vertical.py<gh_stars>0
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from numpy import array
from numpy import float64
from scipy.sparse import diags
from scipy.sparse.linalg import spsolve
from compas.numerical import connectivity_matrix
from compas.numerical import normrow
from compas_tna.diagrams import FormDiagram
from compas_tna.utilities import LoadUpdater
from compas_tna.utilities import update_z
__all__ = [
'vertical_from_zmax',
'vertical_from_bbox',
'vertical_from_q',
'vertical_from_zmax_proxy',
'vertical_from_bbox_proxy',
'vertical_from_q_proxy'
]
def vertical_from_zmax_proxy(formdata, *args, **kwargs):
form = FormDiagram.from_data(formdata)
scale = vertical_from_zmax(form, *args, **kwargs)
return form.to_data(), scale
def vertical_from_bbox_proxy(formdata, *args, **kwargs):
form = FormDiagram.from_data(formdata)
scale = vertical_from_bbox(form, *args, **kwargs)
return form.to_data(), scale
def vertical_from_q_proxy(formdata, *args, **kwargs):
form = FormDiagram.from_data(formdata)
vertical_from_q(form, *args, **kwargs)
return form.to_data()
def vertical_from_zmax(form, zmax, kmax=100, xtol=1e-2, rtol=1e-3, density=1.0, display=True):
"""For the given form and force diagram, compute the scale of the force
diagram for which the highest point of the thrust network is equal to a
specified value.
Parameters
----------
form : compas_tna.diagrams.formdiagram.FormDiagram
The form diagram
force : compas_tna.diagrams.forcediagram.ForceDiagram
The corresponding force diagram.
zmax : float
The maximum height of the thrust network (the default is None, which
implies that the maximum height will be equal to a quarter of the diagonal
of the bounding box of the form diagram).
kmax : int
The maximum number of iterations for computing vertical equilibrium
(the default is 100).
tol : float
The stopping criterion.
density : float
The density for computation of the self-weight of the thrust network
(the default is 1.0). Set this to 0.0 to ignore self-weight and only
consider specified point loads.
display : bool
If True, information about the current iteration will be displayed.
Returns
-------
float
The scale of the forcedensities.
"""
xtol2 = xtol ** 2
# --------------------------------------------------------------------------
# FormDiagram
# --------------------------------------------------------------------------
k_i = form.key_index()
uv_i = form.uv_index()
vcount = len(form.vertex)
anchors = list(form.anchors())
fixed = list(form.fixed())
fixed = set(anchors + fixed)
fixed = [k_i[key] for key in fixed]
free = list(set(range(vcount)) - set(fixed))
edges = [(k_i[u], k_i[v]) for u, v in form.edges_where({'is_edge': True})]
xyz = array(form.vertices_attributes('xyz'), dtype=float64)
thick = array(form.vertices_attribute('t'), dtype=float64).reshape((-1, 1))
p = array(form.vertices_attributes(('px', 'py', 'pz')), dtype=float64)
q = [attr.get('q', 1.0) for key, attr in form.edges_where({'is_edge': True}, True)]
q = array(q, dtype=float64).reshape((-1, 1))
C = connectivity_matrix(edges, 'csr')
Ci = C[:, free]
Cf = C[:, fixed]
Cit = Ci.transpose()
Ct = C.transpose()
# --------------------------------------------------------------------------
# original data
# --------------------------------------------------------------------------
p0 = array(p, copy=True)
q0 = array(q, copy=True)
# --------------------------------------------------------------------------
# load updater
# --------------------------------------------------------------------------
update_loads = LoadUpdater(form, p0, thickness=thick, density=density)
# --------------------------------------------------------------------------
# scale to zmax
# note that zmax should not exceed scale * diagonal
# --------------------------------------------------------------------------
scale = 1.0
for k in range(kmax):
if display:
print(k)
update_loads(p, xyz)
q = scale * q0
Q = diags([q.ravel()], [0])
A = Cit.dot(Q).dot(Ci)
b = p[free, 2] - Cit.dot(Q).dot(Cf).dot(xyz[fixed, 2])
xyz[free, 2] = spsolve(A, b)
z = max(xyz[free, 2])
res2 = (z - zmax) ** 2
if res2 < xtol2:
break
scale = scale * (z / zmax)
# --------------------------------------------------------------------------
# vertical
# --------------------------------------------------------------------------
q = scale * q0
Q = diags([q.ravel()], [0])
res = update_z(xyz, Q, C, p, free, fixed, update_loads, tol=rtol, kmax=kmax, display=display)
# --------------------------------------------------------------------------
# update
# --------------------------------------------------------------------------
l = normrow(C.dot(xyz))
f = q * l
r = Ct.dot(Q).dot(C).dot(xyz) - p
# --------------------------------------------------------------------------
# form
# --------------------------------------------------------------------------
for key, attr in form.vertices(True):
index = k_i[key]
attr['z'] = xyz[index, 2]
attr['rx'] = r[index, 0]
attr['ry'] = r[index, 1]
attr['rz'] = r[index, 2]
for key, attr in form.edges_where({'is_edge': True}, True):
index = uv_i[key]
attr['q'] = q[index, 0]
attr['f'] = f[index, 0]
return scale
def vertical_from_bbox(form, factor=5.0, kmax=100, tol=1e-3, density=1.0, display=True):
# --------------------------------------------------------------------------
# FormDiagram
# --------------------------------------------------------------------------
k_i = form.key_index()
uv_i = form.uv_index()
vcount = len(form.vertex)
anchors = list(form.anchors())
fixed = list(form.fixed())
fixed = set(anchors + fixed)
fixed = [k_i[key] for key in fixed]
free = list(set(range(vcount)) - set(fixed))
edges = [(k_i[u], k_i[v]) for u, v in form.edges_where({'is_edge': True})]
xyz = array(form.vertices_attributes('xyz'), dtype=float64)
thick = array(form.vertices_attribute('t'), dtype=float64).reshape((-1, 1))
p = array(form.vertices_attributes(('px', 'py', 'pz')), dtype=float64)
q = [attr.get('q', 1.0) for key, attr in form.edges_where({'is_edge': True}, True)]
q = array(q, dtype=float64).reshape((-1, 1))
C = connectivity_matrix(edges, 'csr')
Ci = C[:, free]
Cf = C[:, fixed]
Cit = Ci.transpose()
Ct = C.transpose()
# --------------------------------------------------------------------------
# original data
# --------------------------------------------------------------------------
p0 = array(p, copy=True)
q0 = array(q, copy=True)
# --------------------------------------------------------------------------
# load updater
# --------------------------------------------------------------------------
update_loads = LoadUpdater(form, p0, thickness=thick, density=density)
# --------------------------------------------------------------------------
# scale
# --------------------------------------------------------------------------
(xmin, ymin, zmin), (xmax, ymax, zmax) = form.bbox()
d = ((xmax - xmin) ** 2 + (ymax - ymin) ** 2) ** 0.5
scale = d / factor
# --------------------------------------------------------------------------
# vertical
# --------------------------------------------------------------------------
q = scale * q0
Q = diags([q.ravel()], [0])
update_z(xyz, Q, C, p, free, fixed, update_loads, tol=tol, kmax=kmax, display=display)
# --------------------------------------------------------------------------
# update
# --------------------------------------------------------------------------
l = normrow(C.dot(xyz))
f = q * l
r = Ct.dot(Q).dot(C).dot(xyz) - p
sw = p - p0
# --------------------------------------------------------------------------
# form
# --------------------------------------------------------------------------
for key, attr in form.vertices(True):
index = k_i[key]
attr['z'] = xyz[index, 2]
attr['rx'] = r[index, 0]
attr['ry'] = r[index, 1]
attr['rz'] = r[index, 2]
attr['sw'] = sw[index, 2]
for key, attr in form.edges_where({'is_edge': True}, True):
index = uv_i[key]
attr['q'] = q[index, 0]
attr['f'] = f[index, 0]
return scale
def vertical_from_q(form, scale=1.0, density=1.0, kmax=100, tol=1e-3, display=True):
"""Compute vertical equilibrium from the force densities of the independent edges.
Parameters
----------
form : FormDiagram
The form diagram
scale : float
The scale of the horizontal forces.
Default is ``1.0``.
density : float, optional
The density for computation of the self-weight of the thrust network.
Set this to 0.0 to ignore self-weight and only consider specified point loads.
Default is ``1.0``.
kmax : int, optional
The maximum number of iterations for computing vertical equilibrium.
Default is ``100``.
tol : float
The stopping criterion.
Default is ``0.001``.
display : bool
Display information about the current iteration.
Default is ``True``.
"""
k_i = form.key_index()
uv_i = form.uv_index()
vcount = form.number_of_vertices()
anchors = list(form.anchors())
fixed = list(form.fixed())
fixed = set(anchors + fixed)
fixed = [k_i[key] for key in fixed]
edges = [(k_i[u], k_i[v]) for u, v in form.edges_where({'is_edge': True})]
free = list(set(range(vcount)) - set(fixed))
xyz = array(form.vertices_attributes('xyz'), dtype=float64)
thick = array(form.vertices_attribute('t'), dtype=float64).reshape((-1, 1))
p = array(form.vertices_attributes(('px', 'py', 'pz')), dtype=float64)
q = [attr.get('q', 1.0) for key, attr in form.edges_where({'is_edge': True}, True)]
q = array(q, dtype=float64).reshape((-1, 1))
C = connectivity_matrix(edges, 'csr')
# --------------------------------------------------------------------------
# original data
# --------------------------------------------------------------------------
p0 = array(p, copy=True)
q0 = array(q, copy=True)
# --------------------------------------------------------------------------
# load updater
# --------------------------------------------------------------------------
update_loads = LoadUpdater(form, p0, thickness=thick, density=density)
# --------------------------------------------------------------------------
# update forcedensity based on given q[ind]
# --------------------------------------------------------------------------
q = scale * q0
Q = diags([q.ravel()], [0])
# --------------------------------------------------------------------------
# compute vertical
# --------------------------------------------------------------------------
update_z(xyz, Q, C, p, free, fixed, update_loads, tol=tol, kmax=kmax, display=display)
# --------------------------------------------------------------------------
# update
# --------------------------------------------------------------------------
l = normrow(C.dot(xyz))
f = q * l
r = C.transpose().dot(Q).dot(C).dot(xyz) - p
sw = p - p0
# --------------------------------------------------------------------------
# form
# --------------------------------------------------------------------------
for key, attr in form.vertices(True):
index = k_i[key]
attr['z'] = xyz[index, 2]
attr['rx'] = r[index, 0]
attr['ry'] = r[index, 1]
attr['rz'] = r[index, 2]
attr['sw'] = sw[index, 2]
for key, attr in form.edges_where({'is_edge': True}, True):
index = uv_i[key]
attr['f'] = f[index, 0]
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
pass
|
en
| 0.301615
|
For the given form and force diagram, compute the scale of the force diagram for which the highest point of the thrust network is equal to a specified value. Parameters ---------- form : compas_tna.diagrams.formdiagram.FormDiagram The form diagram force : compas_tna.diagrams.forcediagram.ForceDiagram The corresponding force diagram. zmax : float The maximum height of the thrust network (the default is None, which implies that the maximum height will be equal to a quarter of the diagonal of the bounding box of the form diagram). kmax : int The maximum number of iterations for computing vertical equilibrium (the default is 100). tol : float The stopping criterion. density : float The density for computation of the self-weight of the thrust network (the default is 1.0). Set this to 0.0 to ignore self-weight and only consider specified point loads. display : bool If True, information about the current iteration will be displayed. Returns ------- float The scale of the forcedensities. # -------------------------------------------------------------------------- # FormDiagram # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # original data # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # load updater # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # scale to zmax # note that zmax should not exceed scale * diagonal # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # vertical # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # update # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # form # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # FormDiagram # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # original data # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # load updater # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # scale # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # vertical # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # update # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # form # -------------------------------------------------------------------------- Compute vertical equilibrium from the force densities of the independent edges. Parameters ---------- form : FormDiagram The form diagram scale : float The scale of the horizontal forces. Default is ``1.0``. density : float, optional The density for computation of the self-weight of the thrust network. Set this to 0.0 to ignore self-weight and only consider specified point loads. Default is ``1.0``. kmax : int, optional The maximum number of iterations for computing vertical equilibrium. Default is ``100``. tol : float The stopping criterion. Default is ``0.001``. display : bool Display information about the current iteration. Default is ``True``. # -------------------------------------------------------------------------- # original data # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # load updater # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # update forcedensity based on given q[ind] # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # compute vertical # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # update # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # form # -------------------------------------------------------------------------- # ============================================================================== # Main # ==============================================================================
| 2.079972
| 2
|
lib/djado/paves.py
|
hdknr/djado
| 0
|
6625729
|
<gh_stars>0
from paver.easy import (
task, cmdopts, consume_args
)
import os
def path(name):
return os.path.join(
os.path.dirname(os.path.abspath(__file__)),
name
)
def manage_py(args, do=False, settings_class="app.settings"):
#: manage.py
os.environ.setdefault("DJANGO_SETTINGS_MODULE", settings_class)
from django.core.management import execute_from_command_line
if do:
from django.conf import settings
settings.INSTALLED_APPS = settings.INSTALLED_APPS + [
'djado',
'django_extensions', ]
execute_from_command_line(args)
@task
@cmdopts([
('port=', 'p', 'TCP PORT'),
])
def runserver(options):
''' Run Django Web Application '''
listen = "0.0.0.0:{}".format(
options.get('port', '8000'))
args = ['manage.py', 'runserver', listen]
manage_py(args)
@task
@consume_args
def do(args):
''' Run manage.py with additional functions '''
args.insert(0, 'manage.py')
manage_py(args, do=True)
|
from paver.easy import (
task, cmdopts, consume_args
)
import os
def path(name):
return os.path.join(
os.path.dirname(os.path.abspath(__file__)),
name
)
def manage_py(args, do=False, settings_class="app.settings"):
#: manage.py
os.environ.setdefault("DJANGO_SETTINGS_MODULE", settings_class)
from django.core.management import execute_from_command_line
if do:
from django.conf import settings
settings.INSTALLED_APPS = settings.INSTALLED_APPS + [
'djado',
'django_extensions', ]
execute_from_command_line(args)
@task
@cmdopts([
('port=', 'p', 'TCP PORT'),
])
def runserver(options):
''' Run Django Web Application '''
listen = "0.0.0.0:{}".format(
options.get('port', '8000'))
args = ['manage.py', 'runserver', listen]
manage_py(args)
@task
@consume_args
def do(args):
''' Run manage.py with additional functions '''
args.insert(0, 'manage.py')
manage_py(args, do=True)
|
en
| 0.76023
|
#: manage.py Run Django Web Application Run manage.py with additional functions
| 2.214145
| 2
|
resimpy/simulate/dispatcher/batch/Bulk.py
|
cribbslab/resimpy
| 0
|
6625730
|
<filename>resimpy/simulate/dispatcher/batch/Bulk.py
__version__ = "v1.0"
__copyright__ = "Copyright 2022"
__license__ = "MIT"
__lab__ = "<NAME> lab"
import numpy as np
from resimpy.simulate.dispatcher.single.Bulk import bulk as simubulk
from resimpy.gspl.FromSimulator import fromSimulator
from resimpy.Path import to
class bulk(object):
def __init__(self, ):
self.permutation_num = 1
self.umi_unit_len_fixed = 10
# self.seq_len_fixed = 100
# self.umi_num_fixed = 50
self.pcr_num_fixed = 12
self.pcr_err_fixed = 1e-4
self.seq_err_fixed = 1e-2
self.ampl_rate_fixed = 0.80
self.sim_thres_fixed = 3
self.seq_sub_spl_rate = 0.3333
self.gspl = fromSimulator(simulator='SPsimSeqFixSM').run()
self.ampl_rates = np.linspace(0.1, 1, 10)
self.umi_unit_lens = np.arange(8, 36 + 1, 1)
self.umi_nums = np.arange(20, 140 + 20, 20)
self.pcr_nums = np.arange(1, 14 + 1, 1)
self.pcr_errs, self.seq_errs = self.errors()
print(self.pcr_errs, self.seq_errs)
self.metrics = {
'pcr_nums': self.pcr_nums,
'pcr_errs': self.pcr_errs,
'seq_errs': self.seq_errs,
'ampl_rates': self.ampl_rates,
'umi_lens': self.umi_unit_lens,
}
self.fastq_fn_pref = {
'pcr_nums': 'pcr_',
'pcr_errs': 'pcr_err_',
'seq_errs': 'seq_err_',
'ampl_rates': 'ampl_rate_',
'umi_lens': 'umi_len_',
}
def errors(self, ):
pcr_errs = []
seq_errs = []
e = 1e-5
while e < 3e-1:
pcr_errs.append(e)
seq_errs.append(e)
if 5 * e < 3e-1:
pcr_errs.append(2.5 * e)
pcr_errs.append(5 * e)
pcr_errs.append(7.5 * e)
seq_errs.append(2.5 * e)
seq_errs.append(5 * e)
seq_errs.append(7.5 * e)
e = 10 * e
pcr_errs.append(0.125)
seq_errs.append(0.125)
pcr_errs.append(0.15)
seq_errs.append(0.15)
pcr_errs.append(0.2)
seq_errs.append(0.2)
pcr_errs.append(0.225)
seq_errs.append(0.225)
pcr_errs.append(0.25)
seq_errs.append(0.25)
pcr_errs.append(0.3)
seq_errs.append(0.3)
# print(pcr_errs)
# print(seq_errs)
return pcr_errs, seq_errs
def pcrNums(self, ):
for pn in range(self.permutation_num):
simu_params = {
'init_seq_setting': {
'gspl': self.gspl,
'umi_unit_pattern': 1,
'umi_unit_len': self.umi_unit_len_fixed,
# 'seq_len': self.seq_len_fixed - self.umi_unit_len_fixed,
'is_seed': True,
'working_dir': to('data/simu/monomer/bulk/pcr_num/permute_') + str(pn) + '/',
'is_sv_umi_lib': True,
'umi_lib_fpn': to('data/simu/monomer/bulk/pcr_num/permute_') + str(pn) + '/',
# 'is_sv_seq_lib': True,
# 'seq_lib_fpn': to('data/simu/monomer/bulk/pcr_num/permute_') + str(pn) + '/seq.txt',
'condis': ['umi'],
'sim_thres': self.sim_thres_fixed,
'permutation': pn,
},
'ampl_rate': self.ampl_rate_fixed,
'pcr_nums': self.pcr_nums,
'err_num_met': 'nbinomial',
'pcr_error': self.pcr_err_fixed,
'seq_error': self.seq_err_fixed,
'seq_sub_spl_rate': self.seq_sub_spl_rate,
'use_seed': False,
'seed': None,
'write': {
'fastq_fp': to('data/simu/monomer/bulk/pcr_num/permute_') + str(pn) + '/',
'fastq_fn': '',
}
}
p = simubulk(simu_params)
print(p.ondemandPCRNums())
return
def pcrErrs(self, ):
for pn in range(self.permutation_num):
simu_params = {
'init_seq_setting': {
'gspl': self.gspl,
'umi_unit_pattern': 1,
'umi_unit_len': self.umi_unit_len_fixed,
# 'seq_len': self.seq_len_fixed - self.umi_unit_len_fixed,
'is_seed': True,
'working_dir': to('data/simu/monomer/bulk/pcr_err/permute_') + str(pn) + '/',
'is_sv_umi_lib': True,
'umi_lib_fpn': to('data/simu/monomer/bulk/pcr_err/permute_') + str(pn) + '/',
# 'is_sv_seq_lib': True,
# 'seq_lib_fpn': to('data/simu/monomer/bulk/pcr_err/permute_') + str(pn) + '/seq.txt',
'condis': ['umi'],
'sim_thres': self.sim_thres_fixed,
'permutation': pn,
},
'ampl_rate': self.ampl_rate_fixed,
'pcr_num': self.pcr_num_fixed,
'err_num_met': 'nbinomial',
'pcr_errors': self.pcr_errs,
'seq_error': self.seq_err_fixed,
'seq_sub_spl_rate': self.seq_sub_spl_rate,
'use_seed': False,
'seed': None,
'write': {
'fastq_fp': to('data/simu/monomer/bulk/pcr_err/permute_') + str(pn) + '/',
'fastq_fn': '',
}
}
p = simubulk(simu_params)
print(p.ondemandPCRErrs())
return
def seqErrs(self, ):
for pn in range(self.permutation_num):
simu_params = {
'init_seq_setting': {
'gspl': self.gspl,
'umi_unit_pattern': 1,
'umi_unit_len': self.umi_unit_len_fixed,
# 'seq_len': self.seq_len_fixed - self.umi_unit_len_fixed,
'is_seed': True,
'working_dir': to('data/simu/monomer/bulk/seq_err/permute_') + str(pn) + '/',
'is_sv_umi_lib':True,
'umi_lib_fpn':to('data/simu/monomer/bulk/seq_err/permute_') + str(pn) + '/',
# 'is_sv_seq_lib':True,
# 'seq_lib_fpn':to('data/simu/monomer/bulk/seq_err/permute_') + str(pn) + '/seq.txt',
'condis': ['umi'],
'sim_thres': self.sim_thres_fixed,
'permutation': pn,
},
'ampl_rate': self.ampl_rate_fixed,
'pcr_num': self.pcr_num_fixed,
'err_num_met': 'nbinomial',
'pcr_error': self.pcr_err_fixed,
'seq_errors': self.seq_errs,
'seq_sub_spl_rate': self.seq_sub_spl_rate,
'use_seed': False,
'seed': None,
'write': {
'fastq_fp': to('data/simu/monomer/bulk/seq_err/permute_') + str(pn) + '/',
'fastq_fn': '',
}
}
p = simubulk(simu_params)
print(p.ondemandSeqErrs())
return
def umiLens(self, ):
for pn in range(self.permutation_num):
simu_params = {
'init_seq_setting': {
'gspl': self.gspl,
'umi_unit_pattern': 1,
'umi_unit_lens': self.umi_unit_lens,
# 'seq_len': self.seq_len_fixed,
'is_seed': True,
'working_dir': to('data/simu/monomer/bulk/umi_len/permute_') + str(pn) + '/',
'is_sv_umi_lib': True,
'umi_lib_fp': to('data/simu/monomer/bulk/umi_len/permute_') + str(pn) + '/',
# 'is_sv_seq_lib': True,
# 'seq_lib_fpn': to('data/simu/monomer/bulk/umi_len/permute_') + str(pn) + '/',
'condis': ['umi'],
'sim_thres': 1,
# 'sim_thres': self.sim_thres_fixed,
'permutation': pn,
},
'ampl_rate': self.ampl_rate_fixed,
'pcr_num': self.pcr_num_fixed,
'err_num_met': 'nbinomial',
'pcr_error': self.pcr_err_fixed,
'seq_error': self.seq_err_fixed,
'seq_sub_spl_rate': self.seq_sub_spl_rate,
'use_seed': False,
'seed': None,
'write': {
'fastq_fp': to('data/simu/monomer/bulk/umi_len/permute_') + str(pn) + '/',
'fastq_fn': '',
}
}
p = simubulk(simu_params)
print(p.ondemandUMILens())
return
def amplRates(self, ):
for pn in range(self.permutation_num):
simu_params = {
'init_seq_setting': {
'gspl': self.gspl,
'umi_unit_pattern': 1,
'umi_unit_len': self.umi_unit_len_fixed,
# 'seq_len': self.seq_len_fixed - self.umi_unit_len_fixed,
'is_seed': True,
'working_dir': to('data/simu/monomer/bulk/ampl_rate/permute_') + str(pn) + '/',
'is_sv_umi_lib': True,
'umi_lib_fpn': to('data/simu/monomer/bulk/ampl_rate/permute_') + str(pn) + '/',
# 'is_sv_seq_lib': True,
# 'seq_lib_fpn': to('data/simu/monomer/bulk/ampl_rate/permute_') + str(pn) + '/seq.txt',
'condis': ['umi'],
'sim_thres': self.sim_thres_fixed,
'permutation': pn,
},
'ampl_rates': self.ampl_rates,
'pcr_num': self.pcr_num_fixed,
'err_num_met': 'nbinomial',
'pcr_error': self.pcr_err_fixed,
'seq_error': self.seq_err_fixed,
'seq_sub_spl_rate': self.seq_sub_spl_rate,
'use_seed': False,
'seed': None,
'write': {
'fastq_fp': to('data/simu/monomer/bulk/ampl_rate/permute_') + str(pn) + '/',
'fastq_fn': '',
}
}
p = simubulk(simu_params)
print(p.ondemandAmplRates())
return
if __name__ == "__main__":
p = bulk()
# print(p.pcrNums())
#
# print(p.pcrErrs())
#
# print(p.seqErrs())
print(p.umiLens())
# print(p.amplRates())
|
<filename>resimpy/simulate/dispatcher/batch/Bulk.py
__version__ = "v1.0"
__copyright__ = "Copyright 2022"
__license__ = "MIT"
__lab__ = "<NAME> lab"
import numpy as np
from resimpy.simulate.dispatcher.single.Bulk import bulk as simubulk
from resimpy.gspl.FromSimulator import fromSimulator
from resimpy.Path import to
class bulk(object):
def __init__(self, ):
self.permutation_num = 1
self.umi_unit_len_fixed = 10
# self.seq_len_fixed = 100
# self.umi_num_fixed = 50
self.pcr_num_fixed = 12
self.pcr_err_fixed = 1e-4
self.seq_err_fixed = 1e-2
self.ampl_rate_fixed = 0.80
self.sim_thres_fixed = 3
self.seq_sub_spl_rate = 0.3333
self.gspl = fromSimulator(simulator='SPsimSeqFixSM').run()
self.ampl_rates = np.linspace(0.1, 1, 10)
self.umi_unit_lens = np.arange(8, 36 + 1, 1)
self.umi_nums = np.arange(20, 140 + 20, 20)
self.pcr_nums = np.arange(1, 14 + 1, 1)
self.pcr_errs, self.seq_errs = self.errors()
print(self.pcr_errs, self.seq_errs)
self.metrics = {
'pcr_nums': self.pcr_nums,
'pcr_errs': self.pcr_errs,
'seq_errs': self.seq_errs,
'ampl_rates': self.ampl_rates,
'umi_lens': self.umi_unit_lens,
}
self.fastq_fn_pref = {
'pcr_nums': 'pcr_',
'pcr_errs': 'pcr_err_',
'seq_errs': 'seq_err_',
'ampl_rates': 'ampl_rate_',
'umi_lens': 'umi_len_',
}
def errors(self, ):
pcr_errs = []
seq_errs = []
e = 1e-5
while e < 3e-1:
pcr_errs.append(e)
seq_errs.append(e)
if 5 * e < 3e-1:
pcr_errs.append(2.5 * e)
pcr_errs.append(5 * e)
pcr_errs.append(7.5 * e)
seq_errs.append(2.5 * e)
seq_errs.append(5 * e)
seq_errs.append(7.5 * e)
e = 10 * e
pcr_errs.append(0.125)
seq_errs.append(0.125)
pcr_errs.append(0.15)
seq_errs.append(0.15)
pcr_errs.append(0.2)
seq_errs.append(0.2)
pcr_errs.append(0.225)
seq_errs.append(0.225)
pcr_errs.append(0.25)
seq_errs.append(0.25)
pcr_errs.append(0.3)
seq_errs.append(0.3)
# print(pcr_errs)
# print(seq_errs)
return pcr_errs, seq_errs
def pcrNums(self, ):
for pn in range(self.permutation_num):
simu_params = {
'init_seq_setting': {
'gspl': self.gspl,
'umi_unit_pattern': 1,
'umi_unit_len': self.umi_unit_len_fixed,
# 'seq_len': self.seq_len_fixed - self.umi_unit_len_fixed,
'is_seed': True,
'working_dir': to('data/simu/monomer/bulk/pcr_num/permute_') + str(pn) + '/',
'is_sv_umi_lib': True,
'umi_lib_fpn': to('data/simu/monomer/bulk/pcr_num/permute_') + str(pn) + '/',
# 'is_sv_seq_lib': True,
# 'seq_lib_fpn': to('data/simu/monomer/bulk/pcr_num/permute_') + str(pn) + '/seq.txt',
'condis': ['umi'],
'sim_thres': self.sim_thres_fixed,
'permutation': pn,
},
'ampl_rate': self.ampl_rate_fixed,
'pcr_nums': self.pcr_nums,
'err_num_met': 'nbinomial',
'pcr_error': self.pcr_err_fixed,
'seq_error': self.seq_err_fixed,
'seq_sub_spl_rate': self.seq_sub_spl_rate,
'use_seed': False,
'seed': None,
'write': {
'fastq_fp': to('data/simu/monomer/bulk/pcr_num/permute_') + str(pn) + '/',
'fastq_fn': '',
}
}
p = simubulk(simu_params)
print(p.ondemandPCRNums())
return
def pcrErrs(self, ):
for pn in range(self.permutation_num):
simu_params = {
'init_seq_setting': {
'gspl': self.gspl,
'umi_unit_pattern': 1,
'umi_unit_len': self.umi_unit_len_fixed,
# 'seq_len': self.seq_len_fixed - self.umi_unit_len_fixed,
'is_seed': True,
'working_dir': to('data/simu/monomer/bulk/pcr_err/permute_') + str(pn) + '/',
'is_sv_umi_lib': True,
'umi_lib_fpn': to('data/simu/monomer/bulk/pcr_err/permute_') + str(pn) + '/',
# 'is_sv_seq_lib': True,
# 'seq_lib_fpn': to('data/simu/monomer/bulk/pcr_err/permute_') + str(pn) + '/seq.txt',
'condis': ['umi'],
'sim_thres': self.sim_thres_fixed,
'permutation': pn,
},
'ampl_rate': self.ampl_rate_fixed,
'pcr_num': self.pcr_num_fixed,
'err_num_met': 'nbinomial',
'pcr_errors': self.pcr_errs,
'seq_error': self.seq_err_fixed,
'seq_sub_spl_rate': self.seq_sub_spl_rate,
'use_seed': False,
'seed': None,
'write': {
'fastq_fp': to('data/simu/monomer/bulk/pcr_err/permute_') + str(pn) + '/',
'fastq_fn': '',
}
}
p = simubulk(simu_params)
print(p.ondemandPCRErrs())
return
def seqErrs(self, ):
for pn in range(self.permutation_num):
simu_params = {
'init_seq_setting': {
'gspl': self.gspl,
'umi_unit_pattern': 1,
'umi_unit_len': self.umi_unit_len_fixed,
# 'seq_len': self.seq_len_fixed - self.umi_unit_len_fixed,
'is_seed': True,
'working_dir': to('data/simu/monomer/bulk/seq_err/permute_') + str(pn) + '/',
'is_sv_umi_lib':True,
'umi_lib_fpn':to('data/simu/monomer/bulk/seq_err/permute_') + str(pn) + '/',
# 'is_sv_seq_lib':True,
# 'seq_lib_fpn':to('data/simu/monomer/bulk/seq_err/permute_') + str(pn) + '/seq.txt',
'condis': ['umi'],
'sim_thres': self.sim_thres_fixed,
'permutation': pn,
},
'ampl_rate': self.ampl_rate_fixed,
'pcr_num': self.pcr_num_fixed,
'err_num_met': 'nbinomial',
'pcr_error': self.pcr_err_fixed,
'seq_errors': self.seq_errs,
'seq_sub_spl_rate': self.seq_sub_spl_rate,
'use_seed': False,
'seed': None,
'write': {
'fastq_fp': to('data/simu/monomer/bulk/seq_err/permute_') + str(pn) + '/',
'fastq_fn': '',
}
}
p = simubulk(simu_params)
print(p.ondemandSeqErrs())
return
def umiLens(self, ):
for pn in range(self.permutation_num):
simu_params = {
'init_seq_setting': {
'gspl': self.gspl,
'umi_unit_pattern': 1,
'umi_unit_lens': self.umi_unit_lens,
# 'seq_len': self.seq_len_fixed,
'is_seed': True,
'working_dir': to('data/simu/monomer/bulk/umi_len/permute_') + str(pn) + '/',
'is_sv_umi_lib': True,
'umi_lib_fp': to('data/simu/monomer/bulk/umi_len/permute_') + str(pn) + '/',
# 'is_sv_seq_lib': True,
# 'seq_lib_fpn': to('data/simu/monomer/bulk/umi_len/permute_') + str(pn) + '/',
'condis': ['umi'],
'sim_thres': 1,
# 'sim_thres': self.sim_thres_fixed,
'permutation': pn,
},
'ampl_rate': self.ampl_rate_fixed,
'pcr_num': self.pcr_num_fixed,
'err_num_met': 'nbinomial',
'pcr_error': self.pcr_err_fixed,
'seq_error': self.seq_err_fixed,
'seq_sub_spl_rate': self.seq_sub_spl_rate,
'use_seed': False,
'seed': None,
'write': {
'fastq_fp': to('data/simu/monomer/bulk/umi_len/permute_') + str(pn) + '/',
'fastq_fn': '',
}
}
p = simubulk(simu_params)
print(p.ondemandUMILens())
return
def amplRates(self, ):
for pn in range(self.permutation_num):
simu_params = {
'init_seq_setting': {
'gspl': self.gspl,
'umi_unit_pattern': 1,
'umi_unit_len': self.umi_unit_len_fixed,
# 'seq_len': self.seq_len_fixed - self.umi_unit_len_fixed,
'is_seed': True,
'working_dir': to('data/simu/monomer/bulk/ampl_rate/permute_') + str(pn) + '/',
'is_sv_umi_lib': True,
'umi_lib_fpn': to('data/simu/monomer/bulk/ampl_rate/permute_') + str(pn) + '/',
# 'is_sv_seq_lib': True,
# 'seq_lib_fpn': to('data/simu/monomer/bulk/ampl_rate/permute_') + str(pn) + '/seq.txt',
'condis': ['umi'],
'sim_thres': self.sim_thres_fixed,
'permutation': pn,
},
'ampl_rates': self.ampl_rates,
'pcr_num': self.pcr_num_fixed,
'err_num_met': 'nbinomial',
'pcr_error': self.pcr_err_fixed,
'seq_error': self.seq_err_fixed,
'seq_sub_spl_rate': self.seq_sub_spl_rate,
'use_seed': False,
'seed': None,
'write': {
'fastq_fp': to('data/simu/monomer/bulk/ampl_rate/permute_') + str(pn) + '/',
'fastq_fn': '',
}
}
p = simubulk(simu_params)
print(p.ondemandAmplRates())
return
if __name__ == "__main__":
p = bulk()
# print(p.pcrNums())
#
# print(p.pcrErrs())
#
# print(p.seqErrs())
print(p.umiLens())
# print(p.amplRates())
|
en
| 0.131261
|
# self.seq_len_fixed = 100 # self.umi_num_fixed = 50 # print(pcr_errs) # print(seq_errs) # 'seq_len': self.seq_len_fixed - self.umi_unit_len_fixed, # 'is_sv_seq_lib': True, # 'seq_lib_fpn': to('data/simu/monomer/bulk/pcr_num/permute_') + str(pn) + '/seq.txt', # 'seq_len': self.seq_len_fixed - self.umi_unit_len_fixed, # 'is_sv_seq_lib': True, # 'seq_lib_fpn': to('data/simu/monomer/bulk/pcr_err/permute_') + str(pn) + '/seq.txt', # 'seq_len': self.seq_len_fixed - self.umi_unit_len_fixed, # 'is_sv_seq_lib':True, # 'seq_lib_fpn':to('data/simu/monomer/bulk/seq_err/permute_') + str(pn) + '/seq.txt', # 'seq_len': self.seq_len_fixed, # 'is_sv_seq_lib': True, # 'seq_lib_fpn': to('data/simu/monomer/bulk/umi_len/permute_') + str(pn) + '/', # 'sim_thres': self.sim_thres_fixed, # 'seq_len': self.seq_len_fixed - self.umi_unit_len_fixed, # 'is_sv_seq_lib': True, # 'seq_lib_fpn': to('data/simu/monomer/bulk/ampl_rate/permute_') + str(pn) + '/seq.txt', # print(p.pcrNums()) # # print(p.pcrErrs()) # # print(p.seqErrs()) # print(p.amplRates())
| 2.205883
| 2
|
apps/chat/engine/utils.py
|
SeniorDev34/Django_React_Chat
| 58
|
6625731
|
<gh_stars>10-100
import calendar
def timestamp(dt):
return int(calendar.timegm(dt.timetuple()))
|
import calendar
def timestamp(dt):
return int(calendar.timegm(dt.timetuple()))
|
none
| 1
| 2.318269
| 2
|
|
atx/record/monkey.py
|
jamjven/ATX
| 1,132
|
6625732
|
<reponame>jamjven/ATX<filename>atx/record/monkey.py
#-*- encoding: utf-8 -*-
# I'm Shakespeare!
import re
import cv2
import time
import warnings
import traceback
from random import randint, choice
from scene_detector import SceneDetector
class Reporter(object):
def prepare(self, device, package=None, pids=None):
'''called before loop. initialize device related stuff.'''
raise NotImplementedError()
def collect(self):
'''called every run. collect logs.'''
raise NotImplementedError()
def dump(self):
'''called after loop. dump logs.'''
raise NotImplementedError()
class AdbLineReporter(Reporter):
name = 'shell'
filter_by = None
def __init__(self):
self.buffer = []
# cache grep condition
self.package = None
self.pids = None
self.grep = None
self.prepared = False
def prepare(self, device, package=None, pids=None):
self.device = device
self.package = package
self.pids = pids
if self.filter_by == 'package' and package is not None:
self.grep = re.compile('%s' % package) # there may be dot in package name but that's ok.
elif self.filter_by == 'pids' and pids is not None:
self.grep = re.compile('|'.join([str(p) for p in pids]))
self.prepared = True
def collect(self):
if not self.prepared:
return
cmd = self.command()
lines = self.device.adb_shell(cmd).split('\n')
# print cmd, len(lines), lines[0]
for line in lines:
line = line.strip()
if not line:
continue
if self.grep is not None and self.grep.search(line) is None:
continue
self.process_line(line)
self.buffer.append(line)
def process_line(self, line):
pass
def dump(self):
if not self.buffer:
print 'nothing to dump'
return
filename = '%s_%s.log' % (self.name, time.strftime('%m%d_%H%M%S'))
if self.package is not None:
filename = '%s_%s' % (self.package, filename)
print 'dump report to', filename
with open(filename, 'w') as f:
f.write('\n'.join(self.buffer))
class LogcatReporter(AdbLineReporter):
name = 'logcat'
timefmt = '%m-%d %H:%M:%S.0'
timepat = re.compile('\d\d-\d\d\s+\d\d:\d\d:\d\d\.\d+')
filter_by = 'pids'
def __init__(self):
super(LogcatReporter, self).__init__()
self.timestr = time.strftime(self.timefmt, time.localtime())
def prepare(self, device, package=None, pids=None):
super(LogcatReporter, self).prepare(device, package, pids)
self.device.adb_shell('logcat -c')
def command(self):
return "logcat -t '%s' -v time" % self.timestr
def process_line(self, line):
m = self.timepat.search(line)
if m:
self.timestr = m.group()
# TODO
# the last digits should be increased by 1,
# or there will be some duplicated lines.
# TODO: clean anr/traces.txt on non-root devices.
class AnrTraceReporter(AdbLineReporter):
name = 'anr'
def command(self):
return 'cat /data/anr/traces.txt'
_default_reporters = (LogcatReporter,)
class Monkey(object):
actions = ('touch', 'swipe', 'pinchin', 'pinchout', 'home', 'menu', 'back')
delay = 0.5
def __init__(self, probs):
total = sum(probs.values())
self.weights = []
accum = 0
for i in range(len(self.actions)):
a = self.actions[i]
w = probs.pop(a, 0)
self.weights.append(int(accum*10000./total))
accum += w
self.weights.append(int(accum*10000./total))
if probs:
warnings.warn('Unsupported actions: %s' % probs.keys())
self.device = None
self.reporters = [r() for r in _default_reporters]
def run(self, device, package=None, maxruns=None):
self.device = device
pids = None
if package is not None:
self.device.start_app(package)
time.sleep(1)
pids = self.device.get_package_pids(package)
for reporter in self.reporters:
reporter.prepare(device, package, pids)
count = 0
while maxruns is None or count < maxruns:
try:
time.sleep(self.delay)
self.next_action()
for reporter in self.reporters:
reporter.collect()
except KeyboardInterrupt:
break
except:
traceback.print_exc()
count += 1
for reporter in self.reporters:
reporter.dump()
def next_action(self):
r = randint(1, 10000)
for i in range(len(self.actions)-1):
if r <= self.weights[i+1]:
break
a = self.actions[i]
func = getattr(self, 'do_%s' % a, None)
if func is not None:
func()
else:
print 'unknown action', a
def do_touch(self):
w, h = self.device.display
x, y = randint(1, w), randint(1, h)
self.device.touch(x, y)
def do_swipe(self):
w, h = self.device.display
x1, y1, x2, y2 = randint(1, w), randint(1, h), randint(1, w), randint(1, h)
self.device.swipe(x1, y1, x2, y2)
# def do_pinchin(self):
# w, h = self.device.display
# angle = randint(0, 360)
# def do_pinchout(self):
# w, h = self.device.display
# angle = randint(0, 360)
def do_home(self):
self.device.home()
def do_menu(self):
self.device.menu()
def do_back(self):
self.device.back()
class StupidMonkey(Monkey):
'''find touchables through hard work'''
movestep = 30 #pixels
def __init__(self, probs, scene_directory):
super(StupidMonkey, self).__init__(probs)
self.scene_detector = SceneDetector(scene_directory)
self.scene_touches = {}
self.scene_rects = {}
def get_current_scene(self):
screen = self.device.screenshot_cv2()
h, w = screen.shape[:2]
img = cv2.resize(screen, (w/2, h/2))
scene = self.scene_detector.detect(img)
for rect in self.scene_rects.get(str(scene),{}).itervalues():
l, t, r, b = rect
cv2.rectangle(img, (l, t), (r, b), 255, 2)
cv2.imshow('scene', img)
cv2.waitKey(1)
return scene
def do_touch(self):
# width, height = self.device.display
width, height = 1080, 1920
scene = self.get_current_scene()
if scene is None:
# fall back to random point
x, y = randint(1, width), randint(1, height)
else:
pos = self.scene_touches.get(str(scene), 0)
w, h = width/self.movestep, height/self.movestep # grid points
dy, dx = divmod(pos, w-1)
if dy >= h-1:
# TODO: return a random clickable point
x, y = randint(1, width), randint(1, height)
else:
x, y = (dx+1)*self.movestep, (dy+1)*self.movestep
self.scene_touches[str(scene)] = pos+1
self.last_touch_point = x, y
self.device.touch(x, y)
# watch what happend after touch
if scene is None:
return
newscene = self.get_current_scene()
if newscene is None or newscene == scene:
return
s1 = str(scene)
s2 = str(newscene)
if s1 not in self.scene_rects:
self.scene_rects[s1] = {}
if s2 not in self.scene_rects[s1]:
self.scene_rects[s1][s2] = (x, y, x, y)
else:
l, t, r, b = self.scene_rects[s1][s2]
l, r = min(l, x), max(r, x)
t, b = min(t, y), max(t, y)
self.scene_rects[s1][s2] = (l, t, r, b)
def do_swipe(self):
pass
class RandomContourMonkey(Monkey):
def do_touch(self):
width, height = 1080, 1920
screen = self.device.screenshot_cv2()
h, w = screen.shape[:2]
img = cv2.resize(screen, (w/2, h/2))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 80, 200)
_, thresh = cv2.threshold(edges, 0, 255, cv2.THRESH_OTSU)
contours, _ = cv2.findContours(thresh, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
contours.sort(key=lambda cnt: len(cnt), reverse=True)
rects = []
for cnt in contours:
hull = cv2.convexHull(cnt)
hull_area = cv2.contourArea(hull)
x,y,w,h = cv2.boundingRect(cnt)
rect_area = float(w*h)
if w<20 or h<20 or rect_area<100:
continue
if hull_area/rect_area < 0.50:
continue
rects.append((x, y, x+w, y+h))
cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2)
if not rects:
x, y = randint(1, width), randint(1, height)
else:
x1, y1, x2, y2 = choice(rects)
x, y = randint(x1, x2), randint(y1, y2)
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
x, y = self.device.screen2touch(x*2, y*2)
self.device.touch(x, y)
cv2.imshow('img', img)
cv2.waitKey(1)
def do_swipe(self):
pass
if __name__ == '__main__':
pass
|
#-*- encoding: utf-8 -*-
# I'm Shakespeare!
import re
import cv2
import time
import warnings
import traceback
from random import randint, choice
from scene_detector import SceneDetector
class Reporter(object):
def prepare(self, device, package=None, pids=None):
'''called before loop. initialize device related stuff.'''
raise NotImplementedError()
def collect(self):
'''called every run. collect logs.'''
raise NotImplementedError()
def dump(self):
'''called after loop. dump logs.'''
raise NotImplementedError()
class AdbLineReporter(Reporter):
name = 'shell'
filter_by = None
def __init__(self):
self.buffer = []
# cache grep condition
self.package = None
self.pids = None
self.grep = None
self.prepared = False
def prepare(self, device, package=None, pids=None):
self.device = device
self.package = package
self.pids = pids
if self.filter_by == 'package' and package is not None:
self.grep = re.compile('%s' % package) # there may be dot in package name but that's ok.
elif self.filter_by == 'pids' and pids is not None:
self.grep = re.compile('|'.join([str(p) for p in pids]))
self.prepared = True
def collect(self):
if not self.prepared:
return
cmd = self.command()
lines = self.device.adb_shell(cmd).split('\n')
# print cmd, len(lines), lines[0]
for line in lines:
line = line.strip()
if not line:
continue
if self.grep is not None and self.grep.search(line) is None:
continue
self.process_line(line)
self.buffer.append(line)
def process_line(self, line):
pass
def dump(self):
if not self.buffer:
print 'nothing to dump'
return
filename = '%s_%s.log' % (self.name, time.strftime('%m%d_%H%M%S'))
if self.package is not None:
filename = '%s_%s' % (self.package, filename)
print 'dump report to', filename
with open(filename, 'w') as f:
f.write('\n'.join(self.buffer))
class LogcatReporter(AdbLineReporter):
name = 'logcat'
timefmt = '%m-%d %H:%M:%S.0'
timepat = re.compile('\d\d-\d\d\s+\d\d:\d\d:\d\d\.\d+')
filter_by = 'pids'
def __init__(self):
super(LogcatReporter, self).__init__()
self.timestr = time.strftime(self.timefmt, time.localtime())
def prepare(self, device, package=None, pids=None):
super(LogcatReporter, self).prepare(device, package, pids)
self.device.adb_shell('logcat -c')
def command(self):
return "logcat -t '%s' -v time" % self.timestr
def process_line(self, line):
m = self.timepat.search(line)
if m:
self.timestr = m.group()
# TODO
# the last digits should be increased by 1,
# or there will be some duplicated lines.
# TODO: clean anr/traces.txt on non-root devices.
class AnrTraceReporter(AdbLineReporter):
name = 'anr'
def command(self):
return 'cat /data/anr/traces.txt'
_default_reporters = (LogcatReporter,)
class Monkey(object):
actions = ('touch', 'swipe', 'pinchin', 'pinchout', 'home', 'menu', 'back')
delay = 0.5
def __init__(self, probs):
total = sum(probs.values())
self.weights = []
accum = 0
for i in range(len(self.actions)):
a = self.actions[i]
w = probs.pop(a, 0)
self.weights.append(int(accum*10000./total))
accum += w
self.weights.append(int(accum*10000./total))
if probs:
warnings.warn('Unsupported actions: %s' % probs.keys())
self.device = None
self.reporters = [r() for r in _default_reporters]
def run(self, device, package=None, maxruns=None):
self.device = device
pids = None
if package is not None:
self.device.start_app(package)
time.sleep(1)
pids = self.device.get_package_pids(package)
for reporter in self.reporters:
reporter.prepare(device, package, pids)
count = 0
while maxruns is None or count < maxruns:
try:
time.sleep(self.delay)
self.next_action()
for reporter in self.reporters:
reporter.collect()
except KeyboardInterrupt:
break
except:
traceback.print_exc()
count += 1
for reporter in self.reporters:
reporter.dump()
def next_action(self):
r = randint(1, 10000)
for i in range(len(self.actions)-1):
if r <= self.weights[i+1]:
break
a = self.actions[i]
func = getattr(self, 'do_%s' % a, None)
if func is not None:
func()
else:
print 'unknown action', a
def do_touch(self):
w, h = self.device.display
x, y = randint(1, w), randint(1, h)
self.device.touch(x, y)
def do_swipe(self):
w, h = self.device.display
x1, y1, x2, y2 = randint(1, w), randint(1, h), randint(1, w), randint(1, h)
self.device.swipe(x1, y1, x2, y2)
# def do_pinchin(self):
# w, h = self.device.display
# angle = randint(0, 360)
# def do_pinchout(self):
# w, h = self.device.display
# angle = randint(0, 360)
def do_home(self):
self.device.home()
def do_menu(self):
self.device.menu()
def do_back(self):
self.device.back()
class StupidMonkey(Monkey):
'''find touchables through hard work'''
movestep = 30 #pixels
def __init__(self, probs, scene_directory):
super(StupidMonkey, self).__init__(probs)
self.scene_detector = SceneDetector(scene_directory)
self.scene_touches = {}
self.scene_rects = {}
def get_current_scene(self):
screen = self.device.screenshot_cv2()
h, w = screen.shape[:2]
img = cv2.resize(screen, (w/2, h/2))
scene = self.scene_detector.detect(img)
for rect in self.scene_rects.get(str(scene),{}).itervalues():
l, t, r, b = rect
cv2.rectangle(img, (l, t), (r, b), 255, 2)
cv2.imshow('scene', img)
cv2.waitKey(1)
return scene
def do_touch(self):
# width, height = self.device.display
width, height = 1080, 1920
scene = self.get_current_scene()
if scene is None:
# fall back to random point
x, y = randint(1, width), randint(1, height)
else:
pos = self.scene_touches.get(str(scene), 0)
w, h = width/self.movestep, height/self.movestep # grid points
dy, dx = divmod(pos, w-1)
if dy >= h-1:
# TODO: return a random clickable point
x, y = randint(1, width), randint(1, height)
else:
x, y = (dx+1)*self.movestep, (dy+1)*self.movestep
self.scene_touches[str(scene)] = pos+1
self.last_touch_point = x, y
self.device.touch(x, y)
# watch what happend after touch
if scene is None:
return
newscene = self.get_current_scene()
if newscene is None or newscene == scene:
return
s1 = str(scene)
s2 = str(newscene)
if s1 not in self.scene_rects:
self.scene_rects[s1] = {}
if s2 not in self.scene_rects[s1]:
self.scene_rects[s1][s2] = (x, y, x, y)
else:
l, t, r, b = self.scene_rects[s1][s2]
l, r = min(l, x), max(r, x)
t, b = min(t, y), max(t, y)
self.scene_rects[s1][s2] = (l, t, r, b)
def do_swipe(self):
pass
class RandomContourMonkey(Monkey):
def do_touch(self):
width, height = 1080, 1920
screen = self.device.screenshot_cv2()
h, w = screen.shape[:2]
img = cv2.resize(screen, (w/2, h/2))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 80, 200)
_, thresh = cv2.threshold(edges, 0, 255, cv2.THRESH_OTSU)
contours, _ = cv2.findContours(thresh, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
contours.sort(key=lambda cnt: len(cnt), reverse=True)
rects = []
for cnt in contours:
hull = cv2.convexHull(cnt)
hull_area = cv2.contourArea(hull)
x,y,w,h = cv2.boundingRect(cnt)
rect_area = float(w*h)
if w<20 or h<20 or rect_area<100:
continue
if hull_area/rect_area < 0.50:
continue
rects.append((x, y, x+w, y+h))
cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2)
if not rects:
x, y = randint(1, width), randint(1, height)
else:
x1, y1, x2, y2 = choice(rects)
x, y = randint(x1, x2), randint(y1, y2)
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
x, y = self.device.screen2touch(x*2, y*2)
self.device.touch(x, y)
cv2.imshow('img', img)
cv2.waitKey(1)
def do_swipe(self):
pass
if __name__ == '__main__':
pass
|
en
| 0.730939
|
#-*- encoding: utf-8 -*- # I'm Shakespeare! called before loop. initialize device related stuff. called every run. collect logs. called after loop. dump logs. # cache grep condition # there may be dot in package name but that's ok. # print cmd, len(lines), lines[0] # TODO # the last digits should be increased by 1, # or there will be some duplicated lines. # TODO: clean anr/traces.txt on non-root devices. # def do_pinchin(self): # w, h = self.device.display # angle = randint(0, 360) # def do_pinchout(self): # w, h = self.device.display # angle = randint(0, 360) find touchables through hard work #pixels # width, height = self.device.display # fall back to random point # grid points # TODO: return a random clickable point # watch what happend after touch
| 2.326771
| 2
|
Algorithm.Python/RawDataRegressionAlgorithm.py
|
echoplaza/Lean
| 1
|
6625733
|
<reponame>echoplaza/Lean
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from AlgorithmImports import *
from QuantConnect.Data.Auxiliary import *
from QuantConnect.Lean.Engine.DataFeeds import DefaultDataProvider
_ticker = "GOOGL";
_expectedRawPrices = [ 1157.93, 1158.72,
1131.97, 1114.28, 1120.15, 1114.51, 1134.89, 567.55, 571.50, 545.25, 540.63 ]
# <summary>
# In this algorithm we demonstrate how to use the raw data for our securities
# and verify that the behavior is correct.
# </summary>
# <meta name="tag" content="using data" />
# <meta name="tag" content="regression test" />
class RawDataRegressionAlgorithm(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2014, 3, 25)
self.SetEndDate(2014, 4, 7)
self.SetCash(100000)
# Set our DataNormalizationMode to raw
self.UniverseSettings.DataNormalizationMode = DataNormalizationMode.Raw
self._googl = self.AddEquity(_ticker, Resolution.Daily).Symbol
# Get our factor file for this regression
dataProvider = DefaultDataProvider()
mapFileProvider = LocalDiskMapFileProvider()
mapFileProvider.Initialize(dataProvider)
factorFileProvider = LocalDiskFactorFileProvider()
factorFileProvider.Initialize(mapFileProvider, dataProvider)
# Get our factor file for this regression
self._factorFile = factorFileProvider.Get(self._googl)
def OnData(self, data):
if not self.Portfolio.Invested:
self.SetHoldings(self._googl, 1)
if data.Bars.ContainsKey(self._googl):
googlData = data.Bars[self._googl]
# Assert our volume matches what we expected
expectedRawPrice = _expectedRawPrices.pop(0)
if expectedRawPrice != googlData.Close:
# Our values don't match lets try and give a reason why
dayFactor = self._factorFile.GetPriceScaleFactor(googlData.Time)
probableRawPrice = googlData.Close / dayFactor # Undo adjustment
raise Exception("Close price was incorrect; it appears to be the adjusted value"
if expectedRawPrice == probableRawPrice else
"Close price was incorrect; Data may have changed.")
|
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from AlgorithmImports import *
from QuantConnect.Data.Auxiliary import *
from QuantConnect.Lean.Engine.DataFeeds import DefaultDataProvider
_ticker = "GOOGL";
_expectedRawPrices = [ 1157.93, 1158.72,
1131.97, 1114.28, 1120.15, 1114.51, 1134.89, 567.55, 571.50, 545.25, 540.63 ]
# <summary>
# In this algorithm we demonstrate how to use the raw data for our securities
# and verify that the behavior is correct.
# </summary>
# <meta name="tag" content="using data" />
# <meta name="tag" content="regression test" />
class RawDataRegressionAlgorithm(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2014, 3, 25)
self.SetEndDate(2014, 4, 7)
self.SetCash(100000)
# Set our DataNormalizationMode to raw
self.UniverseSettings.DataNormalizationMode = DataNormalizationMode.Raw
self._googl = self.AddEquity(_ticker, Resolution.Daily).Symbol
# Get our factor file for this regression
dataProvider = DefaultDataProvider()
mapFileProvider = LocalDiskMapFileProvider()
mapFileProvider.Initialize(dataProvider)
factorFileProvider = LocalDiskFactorFileProvider()
factorFileProvider.Initialize(mapFileProvider, dataProvider)
# Get our factor file for this regression
self._factorFile = factorFileProvider.Get(self._googl)
def OnData(self, data):
if not self.Portfolio.Invested:
self.SetHoldings(self._googl, 1)
if data.Bars.ContainsKey(self._googl):
googlData = data.Bars[self._googl]
# Assert our volume matches what we expected
expectedRawPrice = _expectedRawPrices.pop(0)
if expectedRawPrice != googlData.Close:
# Our values don't match lets try and give a reason why
dayFactor = self._factorFile.GetPriceScaleFactor(googlData.Time)
probableRawPrice = googlData.Close / dayFactor # Undo adjustment
raise Exception("Close price was incorrect; it appears to be the adjusted value"
if expectedRawPrice == probableRawPrice else
"Close price was incorrect; Data may have changed.")
|
en
| 0.808985
|
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals. # Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <summary> # In this algorithm we demonstrate how to use the raw data for our securities # and verify that the behavior is correct. # </summary> # <meta name="tag" content="using data" /> # <meta name="tag" content="regression test" /> # Set our DataNormalizationMode to raw # Get our factor file for this regression # Get our factor file for this regression # Assert our volume matches what we expected # Our values don't match lets try and give a reason why # Undo adjustment
| 1.930087
| 2
|
python/1 - Intro to Python for Data Science/2- Python Lists/5- Slicing and dicing.py
|
Gabriela-Santos/datacamp
| 0
|
6625734
|
# Create the areas list
areas = ["hallway", 11.25, "kitchen", 18.0, "living room", 20.0, "bedroom", 10.75, "bathroom", 9.50]
# Use slicing to create downstairs
downstairs = areas[ :6]
# Use slicing to create upstairs
upstairs = areas[6: ]
# Print out downstairs and upstairs
print(downstairs)
print(upstairs)
|
# Create the areas list
areas = ["hallway", 11.25, "kitchen", 18.0, "living room", 20.0, "bedroom", 10.75, "bathroom", 9.50]
# Use slicing to create downstairs
downstairs = areas[ :6]
# Use slicing to create upstairs
upstairs = areas[6: ]
# Print out downstairs and upstairs
print(downstairs)
print(upstairs)
|
en
| 0.697555
|
# Create the areas list # Use slicing to create downstairs # Use slicing to create upstairs # Print out downstairs and upstairs
| 4.058986
| 4
|
changelogs/custom/pypi/djangorestframework.py
|
chris48s/changelogs
| 0
|
6625735
|
def get_urls(releases, **kwargs):
return [
'https://raw.githubusercontent.com/tomchristie/django-rest-framework/master/docs/topics/release-notes.md',
'https://raw.githubusercontent.com/tomchristie/django-rest-framework/version-2.4.x/docs/topics/release-notes.md'
], set()
|
def get_urls(releases, **kwargs):
return [
'https://raw.githubusercontent.com/tomchristie/django-rest-framework/master/docs/topics/release-notes.md',
'https://raw.githubusercontent.com/tomchristie/django-rest-framework/version-2.4.x/docs/topics/release-notes.md'
], set()
|
none
| 1
| 1.601292
| 2
|
|
k3log/test/stdlog.py
|
drmingdrmer/gift
| 0
|
6625736
|
<filename>k3log/test/stdlog.py
import logging
import k3log
logger = k3log.make_logger('/tmp', log_fn='stdlog', level='INFO',
fmt='message')
k3log.add_std_handler(logger, 'stdout', fmt='message', level=logging.ERROR)
logger.debug('debug')
logger.info('stdlog')
logger.error('error')
|
<filename>k3log/test/stdlog.py
import logging
import k3log
logger = k3log.make_logger('/tmp', log_fn='stdlog', level='INFO',
fmt='message')
k3log.add_std_handler(logger, 'stdout', fmt='message', level=logging.ERROR)
logger.debug('debug')
logger.info('stdlog')
logger.error('error')
|
none
| 1
| 2.067764
| 2
|
|
tests/core/views.py
|
MatheusCE/import-export-customized
| 1
|
6625737
|
from django.views.generic.list import ListView
from import_export import mixins
from . import models
class CategoryExportView(mixins.ExportViewFormMixin, ListView):
model = models.Category
|
from django.views.generic.list import ListView
from import_export import mixins
from . import models
class CategoryExportView(mixins.ExportViewFormMixin, ListView):
model = models.Category
|
none
| 1
| 1.637816
| 2
|
|
sepsisSimDiabetes/DataGenerator.py
|
GuyLor/gumbel_max_causal_gadgets_part2
| 0
|
6625738
|
<reponame>GuyLor/gumbel_max_causal_gadgets_part2<gh_stars>0
import numpy as np, random
from .MDP import MDP
from .State import State
from .Action import Action
from tqdm import tqdm_notebook as tqdm
'''
Simulates data generation from an MDP
'''
class DataGenerator(object):
def select_actions(self, state, policy):
'''
select action for state from policy
if unspecified, a random action is returned
'''
if state not in policy:
return Action(action_idx = np.random.randint(8))
return policy[state]
def simulate(self, num_iters, max_num_steps,
policy=None, policy_idx_type='full', p_diabetes=0.2,
output_state_idx_type='obs', use_tqdm=False, tqdm_desc=''):
'''
policy is an array of probabilities
'''
assert policy is not None, "Please specify a policy"
# Set the default value of states / actions to negative -1,
# corresponding to None
iter_states = np.ones((num_iters, max_num_steps+1, 1), dtype=int)*(-1)
iter_actions = np.ones((num_iters, max_num_steps, 1), dtype=int)*(-1)
iter_rewards = np.zeros((num_iters, max_num_steps, 1))
iter_lengths = np.zeros((num_iters, 1), dtype=int)
# Record diabetes, the hidden mixture component
iter_component = np.zeros((num_iters, max_num_steps, 1), dtype=int)
mdp = MDP(init_state_idx=None, # Random initial state
policy_array=policy, policy_idx_type=policy_idx_type,
p_diabetes=p_diabetes)
# Empirical transition / reward matrix
if output_state_idx_type == 'obs':
emp_tx_mat = np.zeros((Action.NUM_ACTIONS_TOTAL,
State.NUM_OBS_STATES, State.NUM_OBS_STATES))
emp_r_mat = np.zeros((Action.NUM_ACTIONS_TOTAL,
State.NUM_OBS_STATES, State.NUM_OBS_STATES))
elif output_state_idx_type == 'full':
emp_tx_mat = np.zeros((Action.NUM_ACTIONS_TOTAL,
State.NUM_FULL_STATES, State.NUM_FULL_STATES))
emp_r_mat = np.zeros((Action.NUM_ACTIONS_TOTAL,
State.NUM_FULL_STATES, State.NUM_FULL_STATES))
elif output_state_idx_type == 'proj_obs':
emp_tx_mat = np.zeros((Action.NUM_ACTIONS_TOTAL,
State.NUM_PROJ_OBS_STATES, State.NUM_PROJ_OBS_STATES))
emp_r_mat = np.zeros((Action.NUM_ACTIONS_TOTAL,
State.NUM_PROJ_OBS_STATES, State.NUM_PROJ_OBS_STATES))
else:
raise NotImplementedError()
for itr in tqdm(range(num_iters), disable=not(use_tqdm), desc=tqdm_desc):
# MDP will generate the diabetes index as well
mdp.state = mdp.get_new_state()
this_diabetic_idx = mdp.state.diabetic_idx
iter_component[itr, :] = this_diabetic_idx # Never changes
iter_states[itr, 0, 0] = mdp.state.get_state_idx(
idx_type=output_state_idx_type)
for step in range(max_num_steps):
step_action = mdp.select_actions()
this_action_idx = step_action.get_action_idx().astype(int)
this_from_state_idx = mdp.state.get_state_idx(
idx_type=output_state_idx_type).astype(int)
# Take the action, new state is property of the MDP
step_reward = mdp.transition(step_action)
this_to_state_idx = mdp.state.get_state_idx(
idx_type=output_state_idx_type).astype(int)
iter_actions[itr, step, 0] = this_action_idx
iter_states[itr, step+1, 0] = this_to_state_idx
# Record in transition matrix
emp_tx_mat[this_action_idx,
this_from_state_idx, this_to_state_idx] += 1
emp_r_mat[this_action_idx,
this_from_state_idx, this_to_state_idx] += step_reward
if step_reward != 0:
iter_rewards[itr, step, 0] = step_reward
iter_lengths[itr, 0] = step+1
break
if step == max_num_steps-1:
iter_lengths[itr, 0] = max_num_steps
return iter_states, iter_actions, iter_lengths, iter_rewards, iter_component, emp_tx_mat, emp_r_mat
|
import numpy as np, random
from .MDP import MDP
from .State import State
from .Action import Action
from tqdm import tqdm_notebook as tqdm
'''
Simulates data generation from an MDP
'''
class DataGenerator(object):
def select_actions(self, state, policy):
'''
select action for state from policy
if unspecified, a random action is returned
'''
if state not in policy:
return Action(action_idx = np.random.randint(8))
return policy[state]
def simulate(self, num_iters, max_num_steps,
policy=None, policy_idx_type='full', p_diabetes=0.2,
output_state_idx_type='obs', use_tqdm=False, tqdm_desc=''):
'''
policy is an array of probabilities
'''
assert policy is not None, "Please specify a policy"
# Set the default value of states / actions to negative -1,
# corresponding to None
iter_states = np.ones((num_iters, max_num_steps+1, 1), dtype=int)*(-1)
iter_actions = np.ones((num_iters, max_num_steps, 1), dtype=int)*(-1)
iter_rewards = np.zeros((num_iters, max_num_steps, 1))
iter_lengths = np.zeros((num_iters, 1), dtype=int)
# Record diabetes, the hidden mixture component
iter_component = np.zeros((num_iters, max_num_steps, 1), dtype=int)
mdp = MDP(init_state_idx=None, # Random initial state
policy_array=policy, policy_idx_type=policy_idx_type,
p_diabetes=p_diabetes)
# Empirical transition / reward matrix
if output_state_idx_type == 'obs':
emp_tx_mat = np.zeros((Action.NUM_ACTIONS_TOTAL,
State.NUM_OBS_STATES, State.NUM_OBS_STATES))
emp_r_mat = np.zeros((Action.NUM_ACTIONS_TOTAL,
State.NUM_OBS_STATES, State.NUM_OBS_STATES))
elif output_state_idx_type == 'full':
emp_tx_mat = np.zeros((Action.NUM_ACTIONS_TOTAL,
State.NUM_FULL_STATES, State.NUM_FULL_STATES))
emp_r_mat = np.zeros((Action.NUM_ACTIONS_TOTAL,
State.NUM_FULL_STATES, State.NUM_FULL_STATES))
elif output_state_idx_type == 'proj_obs':
emp_tx_mat = np.zeros((Action.NUM_ACTIONS_TOTAL,
State.NUM_PROJ_OBS_STATES, State.NUM_PROJ_OBS_STATES))
emp_r_mat = np.zeros((Action.NUM_ACTIONS_TOTAL,
State.NUM_PROJ_OBS_STATES, State.NUM_PROJ_OBS_STATES))
else:
raise NotImplementedError()
for itr in tqdm(range(num_iters), disable=not(use_tqdm), desc=tqdm_desc):
# MDP will generate the diabetes index as well
mdp.state = mdp.get_new_state()
this_diabetic_idx = mdp.state.diabetic_idx
iter_component[itr, :] = this_diabetic_idx # Never changes
iter_states[itr, 0, 0] = mdp.state.get_state_idx(
idx_type=output_state_idx_type)
for step in range(max_num_steps):
step_action = mdp.select_actions()
this_action_idx = step_action.get_action_idx().astype(int)
this_from_state_idx = mdp.state.get_state_idx(
idx_type=output_state_idx_type).astype(int)
# Take the action, new state is property of the MDP
step_reward = mdp.transition(step_action)
this_to_state_idx = mdp.state.get_state_idx(
idx_type=output_state_idx_type).astype(int)
iter_actions[itr, step, 0] = this_action_idx
iter_states[itr, step+1, 0] = this_to_state_idx
# Record in transition matrix
emp_tx_mat[this_action_idx,
this_from_state_idx, this_to_state_idx] += 1
emp_r_mat[this_action_idx,
this_from_state_idx, this_to_state_idx] += step_reward
if step_reward != 0:
iter_rewards[itr, step, 0] = step_reward
iter_lengths[itr, 0] = step+1
break
if step == max_num_steps-1:
iter_lengths[itr, 0] = max_num_steps
return iter_states, iter_actions, iter_lengths, iter_rewards, iter_component, emp_tx_mat, emp_r_mat
|
en
| 0.786317
|
Simulates data generation from an MDP select action for state from policy if unspecified, a random action is returned policy is an array of probabilities # Set the default value of states / actions to negative -1, # corresponding to None # Record diabetes, the hidden mixture component # Random initial state # Empirical transition / reward matrix # MDP will generate the diabetes index as well # Never changes # Take the action, new state is property of the MDP # Record in transition matrix
| 2.741795
| 3
|
test/test_web_bags.py
|
tiddlyweb/tiddlyweb
| 57
|
6625739
|
<filename>test/test_web_bags.py<gh_stars>10-100
from .fixtures import reset_textstore, _teststore, initialize_app, get_http
from tiddlyweb.model.bag import Bag
http = get_http()
def setup_module(module):
initialize_app()
reset_textstore()
module.store = _teststore()
for i in range(5):
bag = Bag('bag%s' % i)
module.store.put(bag)
def test_get_bags_txt():
response, content = http.requestU(
'http://our_test_domain:8001/bags',
headers={'Accept': 'text/plain'},
method='GET')
assert response['status'] == '200'
for i in range(5):
assert 'bag%s\n' % i in content
assert 'etag' in response
etag = response['etag']
response, content = http.requestU(
'http://our_test_domain:8001/bags',
headers={'Accept': 'text/plain',
'if-none-match': etag},
method='GET')
assert response['status'] == '304', content
response, content = http.requestU('http://our_test_domain:8001/bags',
headers={'Accept': 'text/plain',
'if-none-match': etag + 'foo'},
method='GET')
assert response['status'] == '200', content
def test_get_bags_filters():
response, content = http.requestU(
'http://our_test_domain:8001/bags?select=name:bag1',
headers={'Accept': 'text/plain'},
method='GET')
assert response['status'] == '200', content
assert 'bag1\n' in content
assert 'bag2\n' not in content
def test_get_bags_filters_bad():
response, content = http.requestU(
'http://our_test_domain:8001/bags?select=rbag:figgy',
headers={'Accept': 'text/plain'},
method='GET')
assert response['status'] == '400', content
assert 'malformed filter' in content
def test_get_bags_selected_sorted_filters():
response, content = http.requestU(
'http://our_test_domain:8001/bags?select=name:>bag2',
headers={'Accept': 'text/plain'},
method='GET')
assert response['status'] == '200', content
assert 'bag1\n' not in content
assert 'bag2\n' not in content
assert 'bag3\n' in content
def test_get_bags_sorted_filters():
response, content = http.requestU(
'http://our_test_domain:8001/bags?sort=-name',
headers={'Accept': 'text/plain'},
method='GET')
assert response['status'] == '200', content
assert 'bag4\nbag3\nbag2\nbag1\nbag0' in content
def test_get_bags_sorted_limitedfilters():
response, content = http.requestU(
'http://our_test_domain:8001/bags?sort=-name;limit=1,1',
headers={'Accept': 'text/plain'},
method='GET')
assert response['status'] == '200', content
assert content == 'bag3\n'
def test_get_bags_bad_filter():
response, content = http.requestU(
'http://our_test_domain:8001/bags?sort=title',
headers={'Accept': 'text/plain'},
method='GET')
assert response['status'] == '400', content
assert 'malformed filter' in content
|
<filename>test/test_web_bags.py<gh_stars>10-100
from .fixtures import reset_textstore, _teststore, initialize_app, get_http
from tiddlyweb.model.bag import Bag
http = get_http()
def setup_module(module):
initialize_app()
reset_textstore()
module.store = _teststore()
for i in range(5):
bag = Bag('bag%s' % i)
module.store.put(bag)
def test_get_bags_txt():
response, content = http.requestU(
'http://our_test_domain:8001/bags',
headers={'Accept': 'text/plain'},
method='GET')
assert response['status'] == '200'
for i in range(5):
assert 'bag%s\n' % i in content
assert 'etag' in response
etag = response['etag']
response, content = http.requestU(
'http://our_test_domain:8001/bags',
headers={'Accept': 'text/plain',
'if-none-match': etag},
method='GET')
assert response['status'] == '304', content
response, content = http.requestU('http://our_test_domain:8001/bags',
headers={'Accept': 'text/plain',
'if-none-match': etag + 'foo'},
method='GET')
assert response['status'] == '200', content
def test_get_bags_filters():
response, content = http.requestU(
'http://our_test_domain:8001/bags?select=name:bag1',
headers={'Accept': 'text/plain'},
method='GET')
assert response['status'] == '200', content
assert 'bag1\n' in content
assert 'bag2\n' not in content
def test_get_bags_filters_bad():
response, content = http.requestU(
'http://our_test_domain:8001/bags?select=rbag:figgy',
headers={'Accept': 'text/plain'},
method='GET')
assert response['status'] == '400', content
assert 'malformed filter' in content
def test_get_bags_selected_sorted_filters():
response, content = http.requestU(
'http://our_test_domain:8001/bags?select=name:>bag2',
headers={'Accept': 'text/plain'},
method='GET')
assert response['status'] == '200', content
assert 'bag1\n' not in content
assert 'bag2\n' not in content
assert 'bag3\n' in content
def test_get_bags_sorted_filters():
response, content = http.requestU(
'http://our_test_domain:8001/bags?sort=-name',
headers={'Accept': 'text/plain'},
method='GET')
assert response['status'] == '200', content
assert 'bag4\nbag3\nbag2\nbag1\nbag0' in content
def test_get_bags_sorted_limitedfilters():
response, content = http.requestU(
'http://our_test_domain:8001/bags?sort=-name;limit=1,1',
headers={'Accept': 'text/plain'},
method='GET')
assert response['status'] == '200', content
assert content == 'bag3\n'
def test_get_bags_bad_filter():
response, content = http.requestU(
'http://our_test_domain:8001/bags?sort=title',
headers={'Accept': 'text/plain'},
method='GET')
assert response['status'] == '400', content
assert 'malformed filter' in content
|
none
| 1
| 2.109043
| 2
|
|
hoya_handler.py
|
glmck13/AlexaHomePi
| 0
|
6625740
|
<filename>hoya_handler.py
from lxml import html
import requests
import os
def hoya_handler(event, context):
query = {}
speech = ''; audio = ''; stopplay = False
requesttype = event['request']['type']
shouldEndSession = True
if requesttype == "LaunchRequest":
speech = "<NAME>! Ask me for a recap of our last game, or details about our upcoming game. How can I help you?"
shouldEndSession = False
elif requesttype == "IntentRequest":
intent = event['request']['intent']
intentname = intent['name']
try:
slots = intent['slots']
except:
slots = {}
if intentname == "AskPi":
query['Intent'] = intentname
query['Trigger'] = "Hoya"
try:
query['Enum'] = slots['enum']['value']
except:
pass
elif intentname == "AMAZON.PauseIntent" or intentname == "AMAZON.CancelIntent" or intentname == "AMAZON.StopIntent":
stopplay = True
elif intentname == "AMAZON.ResumeIntent" or intentname == "AMAZON.HelpIntent":
pass
if query:
page = requests.get(os.environ.get('ALEXA_URL'), auth=(os.environ.get('ALEXA_USER'), os.environ.get('ALEXA_PASS')), params=query)
tree = html.fromstring(page.content)
speech = tree.xpath('//body/p/text()')[0]
try:
audio = tree.xpath('//body//audio/source/@src')[0]
except:
audio = ''
else:
speech = "Come back soon! Goodbye!"
response = {
"version": "1.0",
"sessionAttributes": {},
"response": {
"outputSpeech": {
"type": "PlainText",
"text": speech
},
"reprompt": {
"outputSpeech": {
"type": "PlainText",
"text": ""
}
},
"card": {
"type": "Simple",
"title": "<NAME>",
"content": ""
},
"shouldEndSession": shouldEndSession
}
}
if audio:
response['response']['card']['content'] = speech + " [" + audio + "] "
response['response']['directives'] = [
{
"type": "AudioPlayer.Play",
"playBehavior": "REPLACE_ALL",
"audioItem": {
"stream": {
"token": audio,
"url": audio,
"offsetInMilliseconds": 0
}
}
}
]
if stopplay:
response['response']['directives'] = [
{
"type": "AudioPlayer.ClearQueue",
"clearBehavior": "CLEAR_ALL"
}
]
return response
|
<filename>hoya_handler.py
from lxml import html
import requests
import os
def hoya_handler(event, context):
query = {}
speech = ''; audio = ''; stopplay = False
requesttype = event['request']['type']
shouldEndSession = True
if requesttype == "LaunchRequest":
speech = "<NAME>! Ask me for a recap of our last game, or details about our upcoming game. How can I help you?"
shouldEndSession = False
elif requesttype == "IntentRequest":
intent = event['request']['intent']
intentname = intent['name']
try:
slots = intent['slots']
except:
slots = {}
if intentname == "AskPi":
query['Intent'] = intentname
query['Trigger'] = "Hoya"
try:
query['Enum'] = slots['enum']['value']
except:
pass
elif intentname == "AMAZON.PauseIntent" or intentname == "AMAZON.CancelIntent" or intentname == "AMAZON.StopIntent":
stopplay = True
elif intentname == "AMAZON.ResumeIntent" or intentname == "AMAZON.HelpIntent":
pass
if query:
page = requests.get(os.environ.get('ALEXA_URL'), auth=(os.environ.get('ALEXA_USER'), os.environ.get('ALEXA_PASS')), params=query)
tree = html.fromstring(page.content)
speech = tree.xpath('//body/p/text()')[0]
try:
audio = tree.xpath('//body//audio/source/@src')[0]
except:
audio = ''
else:
speech = "Come back soon! Goodbye!"
response = {
"version": "1.0",
"sessionAttributes": {},
"response": {
"outputSpeech": {
"type": "PlainText",
"text": speech
},
"reprompt": {
"outputSpeech": {
"type": "PlainText",
"text": ""
}
},
"card": {
"type": "Simple",
"title": "<NAME>",
"content": ""
},
"shouldEndSession": shouldEndSession
}
}
if audio:
response['response']['card']['content'] = speech + " [" + audio + "] "
response['response']['directives'] = [
{
"type": "AudioPlayer.Play",
"playBehavior": "REPLACE_ALL",
"audioItem": {
"stream": {
"token": audio,
"url": audio,
"offsetInMilliseconds": 0
}
}
}
]
if stopplay:
response['response']['directives'] = [
{
"type": "AudioPlayer.ClearQueue",
"clearBehavior": "CLEAR_ALL"
}
]
return response
|
none
| 1
| 2.912781
| 3
|
|
src/python/grpcio_tests/tests_aio/health_check/health_servicer_test.py
|
warlock135/grpc
| 36,552
|
6625741
|
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests AsyncIO version of grpcio-health-checking."""
import asyncio
import logging
import random
import time
import unittest
import grpc
from grpc.experimental import aio
from grpc_health.v1 import health
from grpc_health.v1 import health_pb2
from grpc_health.v1 import health_pb2_grpc
from tests.unit.framework.common import test_constants
from tests_aio.unit._test_base import AioTestBase
_SERVING_SERVICE = 'grpc.test.TestServiceServing'
_UNKNOWN_SERVICE = 'grpc.test.TestServiceUnknown'
_NOT_SERVING_SERVICE = 'grpc.test.TestServiceNotServing'
_WATCH_SERVICE = 'grpc.test.WatchService'
_LARGE_NUMBER_OF_STATUS_CHANGES = 1000
async def _pipe_to_queue(call, queue):
async for response in call:
await queue.put(response)
class HealthServicerTest(AioTestBase):
async def setUp(self):
self._servicer = health.aio.HealthServicer()
await self._servicer.set(_SERVING_SERVICE,
health_pb2.HealthCheckResponse.SERVING)
await self._servicer.set(_UNKNOWN_SERVICE,
health_pb2.HealthCheckResponse.UNKNOWN)
await self._servicer.set(_NOT_SERVING_SERVICE,
health_pb2.HealthCheckResponse.NOT_SERVING)
self._server = aio.server()
port = self._server.add_insecure_port('[::]:0')
health_pb2_grpc.add_HealthServicer_to_server(self._servicer,
self._server)
await self._server.start()
self._channel = aio.insecure_channel('localhost:%d' % port)
self._stub = health_pb2_grpc.HealthStub(self._channel)
async def tearDown(self):
await self._channel.close()
await self._server.stop(None)
async def test_check_empty_service(self):
request = health_pb2.HealthCheckRequest()
resp = await self._stub.Check(request)
self.assertEqual(health_pb2.HealthCheckResponse.SERVING, resp.status)
async def test_check_serving_service(self):
request = health_pb2.HealthCheckRequest(service=_SERVING_SERVICE)
resp = await self._stub.Check(request)
self.assertEqual(health_pb2.HealthCheckResponse.SERVING, resp.status)
async def test_check_unknown_service(self):
request = health_pb2.HealthCheckRequest(service=_UNKNOWN_SERVICE)
resp = await self._stub.Check(request)
self.assertEqual(health_pb2.HealthCheckResponse.UNKNOWN, resp.status)
async def test_check_not_serving_service(self):
request = health_pb2.HealthCheckRequest(service=_NOT_SERVING_SERVICE)
resp = await self._stub.Check(request)
self.assertEqual(health_pb2.HealthCheckResponse.NOT_SERVING,
resp.status)
async def test_check_not_found_service(self):
request = health_pb2.HealthCheckRequest(service='not-found')
with self.assertRaises(aio.AioRpcError) as context:
await self._stub.Check(request)
self.assertEqual(grpc.StatusCode.NOT_FOUND, context.exception.code())
async def test_health_service_name(self):
self.assertEqual(health.SERVICE_NAME, 'grpc.health.v1.Health')
async def test_watch_empty_service(self):
request = health_pb2.HealthCheckRequest(service=health.OVERALL_HEALTH)
call = self._stub.Watch(request)
queue = asyncio.Queue()
task = self.loop.create_task(_pipe_to_queue(call, queue))
self.assertEqual(health_pb2.HealthCheckResponse.SERVING,
(await queue.get()).status)
call.cancel()
with self.assertRaises(asyncio.CancelledError):
await task
self.assertTrue(queue.empty())
async def test_watch_new_service(self):
request = health_pb2.HealthCheckRequest(service=_WATCH_SERVICE)
call = self._stub.Watch(request)
queue = asyncio.Queue()
task = self.loop.create_task(_pipe_to_queue(call, queue))
self.assertEqual(health_pb2.HealthCheckResponse.SERVICE_UNKNOWN,
(await queue.get()).status)
await self._servicer.set(_WATCH_SERVICE,
health_pb2.HealthCheckResponse.SERVING)
self.assertEqual(health_pb2.HealthCheckResponse.SERVING,
(await queue.get()).status)
await self._servicer.set(_WATCH_SERVICE,
health_pb2.HealthCheckResponse.NOT_SERVING)
self.assertEqual(health_pb2.HealthCheckResponse.NOT_SERVING,
(await queue.get()).status)
call.cancel()
with self.assertRaises(asyncio.CancelledError):
await task
self.assertTrue(queue.empty())
async def test_watch_service_isolation(self):
request = health_pb2.HealthCheckRequest(service=_WATCH_SERVICE)
call = self._stub.Watch(request)
queue = asyncio.Queue()
task = self.loop.create_task(_pipe_to_queue(call, queue))
self.assertEqual(health_pb2.HealthCheckResponse.SERVICE_UNKNOWN,
(await queue.get()).status)
await self._servicer.set('some-other-service',
health_pb2.HealthCheckResponse.SERVING)
# The change of health status in other service should be isolated.
# Hence, no additional notification should be observed.
with self.assertRaises(asyncio.TimeoutError):
await asyncio.wait_for(queue.get(), test_constants.SHORT_TIMEOUT)
call.cancel()
with self.assertRaises(asyncio.CancelledError):
await task
self.assertTrue(queue.empty())
async def test_two_watchers(self):
request = health_pb2.HealthCheckRequest(service=_WATCH_SERVICE)
queue1 = asyncio.Queue()
queue2 = asyncio.Queue()
call1 = self._stub.Watch(request)
call2 = self._stub.Watch(request)
task1 = self.loop.create_task(_pipe_to_queue(call1, queue1))
task2 = self.loop.create_task(_pipe_to_queue(call2, queue2))
self.assertEqual(health_pb2.HealthCheckResponse.SERVICE_UNKNOWN,
(await queue1.get()).status)
self.assertEqual(health_pb2.HealthCheckResponse.SERVICE_UNKNOWN,
(await queue2.get()).status)
await self._servicer.set(_WATCH_SERVICE,
health_pb2.HealthCheckResponse.SERVING)
self.assertEqual(health_pb2.HealthCheckResponse.SERVING,
(await queue1.get()).status)
self.assertEqual(health_pb2.HealthCheckResponse.SERVING,
(await queue2.get()).status)
call1.cancel()
call2.cancel()
with self.assertRaises(asyncio.CancelledError):
await task1
with self.assertRaises(asyncio.CancelledError):
await task2
self.assertTrue(queue1.empty())
self.assertTrue(queue2.empty())
async def test_cancelled_watch_removed_from_watch_list(self):
request = health_pb2.HealthCheckRequest(service=_WATCH_SERVICE)
call = self._stub.Watch(request)
queue = asyncio.Queue()
task = self.loop.create_task(_pipe_to_queue(call, queue))
self.assertEqual(health_pb2.HealthCheckResponse.SERVICE_UNKNOWN,
(await queue.get()).status)
call.cancel()
await self._servicer.set(_WATCH_SERVICE,
health_pb2.HealthCheckResponse.SERVING)
with self.assertRaises(asyncio.CancelledError):
await task
# Wait for the serving coroutine to process client cancellation.
timeout = time.monotonic() + test_constants.TIME_ALLOWANCE
while (time.monotonic() < timeout and self._servicer._server_watchers):
await asyncio.sleep(1)
self.assertFalse(self._servicer._server_watchers,
'There should not be any watcher left')
self.assertTrue(queue.empty())
async def test_graceful_shutdown(self):
request = health_pb2.HealthCheckRequest(service=health.OVERALL_HEALTH)
call = self._stub.Watch(request)
queue = asyncio.Queue()
task = self.loop.create_task(_pipe_to_queue(call, queue))
self.assertEqual(health_pb2.HealthCheckResponse.SERVING,
(await queue.get()).status)
await self._servicer.enter_graceful_shutdown()
self.assertEqual(health_pb2.HealthCheckResponse.NOT_SERVING,
(await queue.get()).status)
# This should be a no-op.
await self._servicer.set(health.OVERALL_HEALTH,
health_pb2.HealthCheckResponse.SERVING)
resp = await self._stub.Check(request)
self.assertEqual(health_pb2.HealthCheckResponse.NOT_SERVING,
resp.status)
call.cancel()
with self.assertRaises(asyncio.CancelledError):
await task
self.assertTrue(queue.empty())
async def test_no_duplicate_status(self):
request = health_pb2.HealthCheckRequest(service=_WATCH_SERVICE)
call = self._stub.Watch(request)
queue = asyncio.Queue()
task = self.loop.create_task(_pipe_to_queue(call, queue))
self.assertEqual(health_pb2.HealthCheckResponse.SERVICE_UNKNOWN,
(await queue.get()).status)
last_status = health_pb2.HealthCheckResponse.SERVICE_UNKNOWN
for _ in range(_LARGE_NUMBER_OF_STATUS_CHANGES):
if random.randint(0, 1) == 0:
status = health_pb2.HealthCheckResponse.SERVING
else:
status = health_pb2.HealthCheckResponse.NOT_SERVING
await self._servicer.set(_WATCH_SERVICE, status)
if status != last_status:
self.assertEqual(status, (await queue.get()).status)
last_status = status
call.cancel()
with self.assertRaises(asyncio.CancelledError):
await task
self.assertTrue(queue.empty())
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main(verbosity=2)
|
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests AsyncIO version of grpcio-health-checking."""
import asyncio
import logging
import random
import time
import unittest
import grpc
from grpc.experimental import aio
from grpc_health.v1 import health
from grpc_health.v1 import health_pb2
from grpc_health.v1 import health_pb2_grpc
from tests.unit.framework.common import test_constants
from tests_aio.unit._test_base import AioTestBase
_SERVING_SERVICE = 'grpc.test.TestServiceServing'
_UNKNOWN_SERVICE = 'grpc.test.TestServiceUnknown'
_NOT_SERVING_SERVICE = 'grpc.test.TestServiceNotServing'
_WATCH_SERVICE = 'grpc.test.WatchService'
_LARGE_NUMBER_OF_STATUS_CHANGES = 1000
async def _pipe_to_queue(call, queue):
async for response in call:
await queue.put(response)
class HealthServicerTest(AioTestBase):
async def setUp(self):
self._servicer = health.aio.HealthServicer()
await self._servicer.set(_SERVING_SERVICE,
health_pb2.HealthCheckResponse.SERVING)
await self._servicer.set(_UNKNOWN_SERVICE,
health_pb2.HealthCheckResponse.UNKNOWN)
await self._servicer.set(_NOT_SERVING_SERVICE,
health_pb2.HealthCheckResponse.NOT_SERVING)
self._server = aio.server()
port = self._server.add_insecure_port('[::]:0')
health_pb2_grpc.add_HealthServicer_to_server(self._servicer,
self._server)
await self._server.start()
self._channel = aio.insecure_channel('localhost:%d' % port)
self._stub = health_pb2_grpc.HealthStub(self._channel)
async def tearDown(self):
await self._channel.close()
await self._server.stop(None)
async def test_check_empty_service(self):
request = health_pb2.HealthCheckRequest()
resp = await self._stub.Check(request)
self.assertEqual(health_pb2.HealthCheckResponse.SERVING, resp.status)
async def test_check_serving_service(self):
request = health_pb2.HealthCheckRequest(service=_SERVING_SERVICE)
resp = await self._stub.Check(request)
self.assertEqual(health_pb2.HealthCheckResponse.SERVING, resp.status)
async def test_check_unknown_service(self):
request = health_pb2.HealthCheckRequest(service=_UNKNOWN_SERVICE)
resp = await self._stub.Check(request)
self.assertEqual(health_pb2.HealthCheckResponse.UNKNOWN, resp.status)
async def test_check_not_serving_service(self):
request = health_pb2.HealthCheckRequest(service=_NOT_SERVING_SERVICE)
resp = await self._stub.Check(request)
self.assertEqual(health_pb2.HealthCheckResponse.NOT_SERVING,
resp.status)
async def test_check_not_found_service(self):
request = health_pb2.HealthCheckRequest(service='not-found')
with self.assertRaises(aio.AioRpcError) as context:
await self._stub.Check(request)
self.assertEqual(grpc.StatusCode.NOT_FOUND, context.exception.code())
async def test_health_service_name(self):
self.assertEqual(health.SERVICE_NAME, 'grpc.health.v1.Health')
async def test_watch_empty_service(self):
request = health_pb2.HealthCheckRequest(service=health.OVERALL_HEALTH)
call = self._stub.Watch(request)
queue = asyncio.Queue()
task = self.loop.create_task(_pipe_to_queue(call, queue))
self.assertEqual(health_pb2.HealthCheckResponse.SERVING,
(await queue.get()).status)
call.cancel()
with self.assertRaises(asyncio.CancelledError):
await task
self.assertTrue(queue.empty())
async def test_watch_new_service(self):
request = health_pb2.HealthCheckRequest(service=_WATCH_SERVICE)
call = self._stub.Watch(request)
queue = asyncio.Queue()
task = self.loop.create_task(_pipe_to_queue(call, queue))
self.assertEqual(health_pb2.HealthCheckResponse.SERVICE_UNKNOWN,
(await queue.get()).status)
await self._servicer.set(_WATCH_SERVICE,
health_pb2.HealthCheckResponse.SERVING)
self.assertEqual(health_pb2.HealthCheckResponse.SERVING,
(await queue.get()).status)
await self._servicer.set(_WATCH_SERVICE,
health_pb2.HealthCheckResponse.NOT_SERVING)
self.assertEqual(health_pb2.HealthCheckResponse.NOT_SERVING,
(await queue.get()).status)
call.cancel()
with self.assertRaises(asyncio.CancelledError):
await task
self.assertTrue(queue.empty())
async def test_watch_service_isolation(self):
request = health_pb2.HealthCheckRequest(service=_WATCH_SERVICE)
call = self._stub.Watch(request)
queue = asyncio.Queue()
task = self.loop.create_task(_pipe_to_queue(call, queue))
self.assertEqual(health_pb2.HealthCheckResponse.SERVICE_UNKNOWN,
(await queue.get()).status)
await self._servicer.set('some-other-service',
health_pb2.HealthCheckResponse.SERVING)
# The change of health status in other service should be isolated.
# Hence, no additional notification should be observed.
with self.assertRaises(asyncio.TimeoutError):
await asyncio.wait_for(queue.get(), test_constants.SHORT_TIMEOUT)
call.cancel()
with self.assertRaises(asyncio.CancelledError):
await task
self.assertTrue(queue.empty())
async def test_two_watchers(self):
request = health_pb2.HealthCheckRequest(service=_WATCH_SERVICE)
queue1 = asyncio.Queue()
queue2 = asyncio.Queue()
call1 = self._stub.Watch(request)
call2 = self._stub.Watch(request)
task1 = self.loop.create_task(_pipe_to_queue(call1, queue1))
task2 = self.loop.create_task(_pipe_to_queue(call2, queue2))
self.assertEqual(health_pb2.HealthCheckResponse.SERVICE_UNKNOWN,
(await queue1.get()).status)
self.assertEqual(health_pb2.HealthCheckResponse.SERVICE_UNKNOWN,
(await queue2.get()).status)
await self._servicer.set(_WATCH_SERVICE,
health_pb2.HealthCheckResponse.SERVING)
self.assertEqual(health_pb2.HealthCheckResponse.SERVING,
(await queue1.get()).status)
self.assertEqual(health_pb2.HealthCheckResponse.SERVING,
(await queue2.get()).status)
call1.cancel()
call2.cancel()
with self.assertRaises(asyncio.CancelledError):
await task1
with self.assertRaises(asyncio.CancelledError):
await task2
self.assertTrue(queue1.empty())
self.assertTrue(queue2.empty())
async def test_cancelled_watch_removed_from_watch_list(self):
request = health_pb2.HealthCheckRequest(service=_WATCH_SERVICE)
call = self._stub.Watch(request)
queue = asyncio.Queue()
task = self.loop.create_task(_pipe_to_queue(call, queue))
self.assertEqual(health_pb2.HealthCheckResponse.SERVICE_UNKNOWN,
(await queue.get()).status)
call.cancel()
await self._servicer.set(_WATCH_SERVICE,
health_pb2.HealthCheckResponse.SERVING)
with self.assertRaises(asyncio.CancelledError):
await task
# Wait for the serving coroutine to process client cancellation.
timeout = time.monotonic() + test_constants.TIME_ALLOWANCE
while (time.monotonic() < timeout and self._servicer._server_watchers):
await asyncio.sleep(1)
self.assertFalse(self._servicer._server_watchers,
'There should not be any watcher left')
self.assertTrue(queue.empty())
async def test_graceful_shutdown(self):
request = health_pb2.HealthCheckRequest(service=health.OVERALL_HEALTH)
call = self._stub.Watch(request)
queue = asyncio.Queue()
task = self.loop.create_task(_pipe_to_queue(call, queue))
self.assertEqual(health_pb2.HealthCheckResponse.SERVING,
(await queue.get()).status)
await self._servicer.enter_graceful_shutdown()
self.assertEqual(health_pb2.HealthCheckResponse.NOT_SERVING,
(await queue.get()).status)
# This should be a no-op.
await self._servicer.set(health.OVERALL_HEALTH,
health_pb2.HealthCheckResponse.SERVING)
resp = await self._stub.Check(request)
self.assertEqual(health_pb2.HealthCheckResponse.NOT_SERVING,
resp.status)
call.cancel()
with self.assertRaises(asyncio.CancelledError):
await task
self.assertTrue(queue.empty())
async def test_no_duplicate_status(self):
request = health_pb2.HealthCheckRequest(service=_WATCH_SERVICE)
call = self._stub.Watch(request)
queue = asyncio.Queue()
task = self.loop.create_task(_pipe_to_queue(call, queue))
self.assertEqual(health_pb2.HealthCheckResponse.SERVICE_UNKNOWN,
(await queue.get()).status)
last_status = health_pb2.HealthCheckResponse.SERVICE_UNKNOWN
for _ in range(_LARGE_NUMBER_OF_STATUS_CHANGES):
if random.randint(0, 1) == 0:
status = health_pb2.HealthCheckResponse.SERVING
else:
status = health_pb2.HealthCheckResponse.NOT_SERVING
await self._servicer.set(_WATCH_SERVICE, status)
if status != last_status:
self.assertEqual(status, (await queue.get()).status)
last_status = status
call.cancel()
with self.assertRaises(asyncio.CancelledError):
await task
self.assertTrue(queue.empty())
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main(verbosity=2)
|
en
| 0.872704
|
# Copyright 2020 The gRPC Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Tests AsyncIO version of grpcio-health-checking. # The change of health status in other service should be isolated. # Hence, no additional notification should be observed. # Wait for the serving coroutine to process client cancellation. # This should be a no-op.
| 1.837798
| 2
|
app/main.py
|
damcio/-Bio-Projekt-semestralny
| 0
|
6625742
|
<gh_stars>0
import argparse
import app.helpers.inputParser as inputParser
from app import nussinov
parser = argparse.ArgumentParser(description='Provide secondary-structure name')
parser.add_argument('structure_name', metavar='N', type=str, help='structure name from PDB')
parser.add_argument('prediction_mode', metavar='M', type=str,
help='mode for prediction: nuss - nussinov algorithm, mcan - mcannotate prediction')
def pretty_print_model(i, model, dot_notation):
"""
Pretty prints secondary structure
:rtype: void
:param i: number of model to print
:param model: text representation of RNA model to print
:param dot_notation: dot_notation representation of RNA model to print
"""
print("Model %d" % i)
print(">strand_joined")
for chunk in [model[i:i + 60] for i in range(0, len(model), 60)]:
print(chunk)
for chunk in [dot_notation[i:i + 60] for i in range(0, len(dot_notation), 60)]:
print(chunk)
def nussinov_part(file_path):
"""
Nussinov-algorithm branch of program
:rtype: void
:param file_path: path to pdb file
"""
models = inputParser.read_complete_models(file_path)
for i, model in enumerate(models):
joined_chain_model = inputParser.build_txt_strand_from_chains(model)
base_pair = nussinov.nussinov_algorithm(joined_chain_model)
dot_notation = inputParser.make_dot_notation(joined_chain_model, base_pair)
pretty_print_model(i, joined_chain_model, dot_notation)
def mcannotate_part(file_path):
"""
:rtype: void
:param file_path: path to pdb file
"""
base_pairs = inputParser.annotate_basepairs(file_path)
models = inputParser.read_complete_models(file_path)
for i, (model, base_pair) in enumerate(zip(models, base_pairs)):
fixed_base_pairs = inputParser.fix_base_pairs(model, base_pair)
joined_chain_model = inputParser.build_txt_strand_from_chains(model)
dot_notation = inputParser.make_dot_notation(joined_chain_model, fixed_base_pairs)
pretty_print_model(i, joined_chain_model, dot_notation)
if __name__ == '__main__':
args = parser.parse_args()
prediction_mode = args.prediction_mode
filepath = inputParser.online_input(args.structure_name, 'pdb')
if prediction_mode == 'nuss':
nussinov_part(filepath)
elif prediction_mode == 'mcan':
mcannotate_part(filepath)
else:
print("Unrecognized prediction mode!")
|
import argparse
import app.helpers.inputParser as inputParser
from app import nussinov
parser = argparse.ArgumentParser(description='Provide secondary-structure name')
parser.add_argument('structure_name', metavar='N', type=str, help='structure name from PDB')
parser.add_argument('prediction_mode', metavar='M', type=str,
help='mode for prediction: nuss - nussinov algorithm, mcan - mcannotate prediction')
def pretty_print_model(i, model, dot_notation):
"""
Pretty prints secondary structure
:rtype: void
:param i: number of model to print
:param model: text representation of RNA model to print
:param dot_notation: dot_notation representation of RNA model to print
"""
print("Model %d" % i)
print(">strand_joined")
for chunk in [model[i:i + 60] for i in range(0, len(model), 60)]:
print(chunk)
for chunk in [dot_notation[i:i + 60] for i in range(0, len(dot_notation), 60)]:
print(chunk)
def nussinov_part(file_path):
"""
Nussinov-algorithm branch of program
:rtype: void
:param file_path: path to pdb file
"""
models = inputParser.read_complete_models(file_path)
for i, model in enumerate(models):
joined_chain_model = inputParser.build_txt_strand_from_chains(model)
base_pair = nussinov.nussinov_algorithm(joined_chain_model)
dot_notation = inputParser.make_dot_notation(joined_chain_model, base_pair)
pretty_print_model(i, joined_chain_model, dot_notation)
def mcannotate_part(file_path):
"""
:rtype: void
:param file_path: path to pdb file
"""
base_pairs = inputParser.annotate_basepairs(file_path)
models = inputParser.read_complete_models(file_path)
for i, (model, base_pair) in enumerate(zip(models, base_pairs)):
fixed_base_pairs = inputParser.fix_base_pairs(model, base_pair)
joined_chain_model = inputParser.build_txt_strand_from_chains(model)
dot_notation = inputParser.make_dot_notation(joined_chain_model, fixed_base_pairs)
pretty_print_model(i, joined_chain_model, dot_notation)
if __name__ == '__main__':
args = parser.parse_args()
prediction_mode = args.prediction_mode
filepath = inputParser.online_input(args.structure_name, 'pdb')
if prediction_mode == 'nuss':
nussinov_part(filepath)
elif prediction_mode == 'mcan':
mcannotate_part(filepath)
else:
print("Unrecognized prediction mode!")
|
en
| 0.743308
|
Pretty prints secondary structure :rtype: void :param i: number of model to print :param model: text representation of RNA model to print :param dot_notation: dot_notation representation of RNA model to print Nussinov-algorithm branch of program :rtype: void :param file_path: path to pdb file :rtype: void :param file_path: path to pdb file
| 2.644173
| 3
|
Dataset/Leetcode/train/55/193.py
|
kkcookies99/UAST
| 0
|
6625743
|
class Solution:
def XXX(self, nums: List[int]) -> bool:
l,maxp,end=len(nums),0,0
for i in range(l-1):
if maxp>=i:
maxp=max(maxp,i+nums[i])
if i==end:
end=maxp
if maxp>=l-1:
return True
else:
return False
|
class Solution:
def XXX(self, nums: List[int]) -> bool:
l,maxp,end=len(nums),0,0
for i in range(l-1):
if maxp>=i:
maxp=max(maxp,i+nums[i])
if i==end:
end=maxp
if maxp>=l-1:
return True
else:
return False
|
none
| 1
| 2.821811
| 3
|
|
TransCoda/ui/Actions.py
|
ag-sd/py
| 1
|
6625744
|
<gh_stars>1-10
from enum import Enum
from PyQt5.QtCore import pyqtSignal, QSize
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QToolBar
import CommonUtils
from TransCoda.core.Encoda import EncoderStatus
class Action(Enum):
ADD_FILE = "Add File"
ADD_DIR = "Add Directory"
ADD_YTD = "ADd YouTube"
DEL_FILE = "Remove"
DEL_ALL = "Clear All"
ENCODE = "Encode"
SETTINGS = "Settings"
HELP = "Help"
ABOUT = "About"
CHANGE_STATUS_SUCCESS = EncoderStatus.SUCCESS.name
CHANGE_STATUS_READY = EncoderStatus.READY.name
class MainToolBar(QToolBar):
button_pressed = pyqtSignal(Action)
_ACTION_ENCODE_DISABLED_NO_ENCODER_MESSAGE = "Choose an encoder"
_ACTION_ENCODE_DISABLED_NO_FILES_MESSAGE = "Select files to encode"
_ACTION_ENCODE_DISABLED_NO_OUTPUT_DIR_MESSAGE = "Choose output directory"
_ACTION_ENCODE_READY_MESSAGE = "Start encoding the files"
_ACTION_ENCODE_RUNNING_MESSAGE = "Wait for files in progress to complete and stop"
def __init__(self):
super().__init__()
self.setIconSize(QSize(48, 48))
self.action_add_file = CommonUtils.create_action(name=Action.ADD_FILE.name, shortcut="Ctrl+O",
tooltip="Add a single file ",
func=self.raise_event, parent=self,
icon=QIcon.fromTheme("list-add"))
self.action_add_dir = CommonUtils.create_action(name=Action.ADD_DIR.name, shortcut="Ctrl+D",
tooltip="Add an entire directory",
func=self.raise_event, parent=self,
icon=QIcon.fromTheme("folder-new"))
self.action_clear_all = CommonUtils.create_action(name=Action.DEL_ALL.name, shortcut="Delete",
tooltip="Clear all files",
func=self.raise_event, parent=self,
icon=QIcon.fromTheme("edit-clear"))
self.action_settings = CommonUtils.create_action(name=Action.SETTINGS.name, shortcut="Ctrl+R",
tooltip="Open the settings editor",
func=self.raise_event, parent=self,
icon=QIcon.fromTheme("preferences-system"))
self.action_encode = CommonUtils.create_action(name=Action.ENCODE.name, shortcut="Ctrl+R",
tooltip="Start encoding the files",
func=self.raise_event, parent=self,
icon=QIcon.fromTheme("media-playback-start"))
self.action_help = CommonUtils.create_action(name=Action.HELP.name, shortcut="F1",
tooltip="View online help",
func=self.raise_event, parent=self,
icon=QIcon.fromTheme("help-contents"))
self.action_about = CommonUtils.create_action(name=Action.ABOUT.name, shortcut="Ctrl+I",
tooltip="About this application",
func=self.raise_event, parent=self,
icon=QIcon.fromTheme("help-about"))
self.addAction(self.action_add_file)
self.addAction(self.action_add_dir)
self.addAction(self.action_clear_all)
self.addSeparator()
self.addAction(self.action_settings)
self.addAction(self.action_encode)
self.addSeparator()
self.addAction(self.action_help)
self.addAction(self.action_about)
self.set_encode_state(0, None, None)
def set_encode_state(self, file_count, output_dir, encoder_name):
enabled = encoder_name is not None and output_dir is not None and output_dir is not "" and file_count != 0
self.action_encode.setEnabled(enabled)
if enabled:
self.action_encode.setToolTip(self._ACTION_ENCODE_READY_MESSAGE)
else:
message = []
if file_count == 0:
message.append(self._ACTION_ENCODE_DISABLED_NO_FILES_MESSAGE)
if output_dir is None or output_dir == "":
message.append(self._ACTION_ENCODE_DISABLED_NO_OUTPUT_DIR_MESSAGE)
if encoder_name is None:
message.append(self._ACTION_ENCODE_DISABLED_NO_ENCODER_MESSAGE)
self.action_encode.setToolTip(", ".join(message))
def raise_event(self, event):
self.button_pressed.emit(Action[event])
def encoding_finished(self, file_count, output_dir, encoder_name):
self.action_add_file.setEnabled(True)
self.action_add_dir.setEnabled(True)
self.action_clear_all.setEnabled(True)
self.action_settings.setEnabled(True)
self.action_help.setEnabled(True)
self.action_about.setEnabled(True)
self.action_encode.setIcon(QIcon.fromTheme("media-playback-start"))
self.set_encode_state(file_count, output_dir, encoder_name)
def encoding_started(self):
self.action_add_file.setEnabled(False)
self.action_add_dir.setEnabled(False)
self.action_clear_all.setEnabled(False)
self.action_settings.setEnabled(False)
self.action_help.setEnabled(False)
self.action_about.setEnabled(False)
self.action_encode.setIcon(QIcon.fromTheme("media-playback-stop"))
self.action_encode.setToolTip(self._ACTION_ENCODE_RUNNING_MESSAGE)
|
from enum import Enum
from PyQt5.QtCore import pyqtSignal, QSize
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QToolBar
import CommonUtils
from TransCoda.core.Encoda import EncoderStatus
class Action(Enum):
ADD_FILE = "Add File"
ADD_DIR = "Add Directory"
ADD_YTD = "ADd YouTube"
DEL_FILE = "Remove"
DEL_ALL = "Clear All"
ENCODE = "Encode"
SETTINGS = "Settings"
HELP = "Help"
ABOUT = "About"
CHANGE_STATUS_SUCCESS = EncoderStatus.SUCCESS.name
CHANGE_STATUS_READY = EncoderStatus.READY.name
class MainToolBar(QToolBar):
button_pressed = pyqtSignal(Action)
_ACTION_ENCODE_DISABLED_NO_ENCODER_MESSAGE = "Choose an encoder"
_ACTION_ENCODE_DISABLED_NO_FILES_MESSAGE = "Select files to encode"
_ACTION_ENCODE_DISABLED_NO_OUTPUT_DIR_MESSAGE = "Choose output directory"
_ACTION_ENCODE_READY_MESSAGE = "Start encoding the files"
_ACTION_ENCODE_RUNNING_MESSAGE = "Wait for files in progress to complete and stop"
def __init__(self):
super().__init__()
self.setIconSize(QSize(48, 48))
self.action_add_file = CommonUtils.create_action(name=Action.ADD_FILE.name, shortcut="Ctrl+O",
tooltip="Add a single file ",
func=self.raise_event, parent=self,
icon=QIcon.fromTheme("list-add"))
self.action_add_dir = CommonUtils.create_action(name=Action.ADD_DIR.name, shortcut="Ctrl+D",
tooltip="Add an entire directory",
func=self.raise_event, parent=self,
icon=QIcon.fromTheme("folder-new"))
self.action_clear_all = CommonUtils.create_action(name=Action.DEL_ALL.name, shortcut="Delete",
tooltip="Clear all files",
func=self.raise_event, parent=self,
icon=QIcon.fromTheme("edit-clear"))
self.action_settings = CommonUtils.create_action(name=Action.SETTINGS.name, shortcut="Ctrl+R",
tooltip="Open the settings editor",
func=self.raise_event, parent=self,
icon=QIcon.fromTheme("preferences-system"))
self.action_encode = CommonUtils.create_action(name=Action.ENCODE.name, shortcut="Ctrl+R",
tooltip="Start encoding the files",
func=self.raise_event, parent=self,
icon=QIcon.fromTheme("media-playback-start"))
self.action_help = CommonUtils.create_action(name=Action.HELP.name, shortcut="F1",
tooltip="View online help",
func=self.raise_event, parent=self,
icon=QIcon.fromTheme("help-contents"))
self.action_about = CommonUtils.create_action(name=Action.ABOUT.name, shortcut="Ctrl+I",
tooltip="About this application",
func=self.raise_event, parent=self,
icon=QIcon.fromTheme("help-about"))
self.addAction(self.action_add_file)
self.addAction(self.action_add_dir)
self.addAction(self.action_clear_all)
self.addSeparator()
self.addAction(self.action_settings)
self.addAction(self.action_encode)
self.addSeparator()
self.addAction(self.action_help)
self.addAction(self.action_about)
self.set_encode_state(0, None, None)
def set_encode_state(self, file_count, output_dir, encoder_name):
enabled = encoder_name is not None and output_dir is not None and output_dir is not "" and file_count != 0
self.action_encode.setEnabled(enabled)
if enabled:
self.action_encode.setToolTip(self._ACTION_ENCODE_READY_MESSAGE)
else:
message = []
if file_count == 0:
message.append(self._ACTION_ENCODE_DISABLED_NO_FILES_MESSAGE)
if output_dir is None or output_dir == "":
message.append(self._ACTION_ENCODE_DISABLED_NO_OUTPUT_DIR_MESSAGE)
if encoder_name is None:
message.append(self._ACTION_ENCODE_DISABLED_NO_ENCODER_MESSAGE)
self.action_encode.setToolTip(", ".join(message))
def raise_event(self, event):
self.button_pressed.emit(Action[event])
def encoding_finished(self, file_count, output_dir, encoder_name):
self.action_add_file.setEnabled(True)
self.action_add_dir.setEnabled(True)
self.action_clear_all.setEnabled(True)
self.action_settings.setEnabled(True)
self.action_help.setEnabled(True)
self.action_about.setEnabled(True)
self.action_encode.setIcon(QIcon.fromTheme("media-playback-start"))
self.set_encode_state(file_count, output_dir, encoder_name)
def encoding_started(self):
self.action_add_file.setEnabled(False)
self.action_add_dir.setEnabled(False)
self.action_clear_all.setEnabled(False)
self.action_settings.setEnabled(False)
self.action_help.setEnabled(False)
self.action_about.setEnabled(False)
self.action_encode.setIcon(QIcon.fromTheme("media-playback-stop"))
self.action_encode.setToolTip(self._ACTION_ENCODE_RUNNING_MESSAGE)
|
none
| 1
| 2.43878
| 2
|
|
doc/examples/generate-examples.py
|
johnnovak/twyg
| 9
|
6625745
|
<reponame>johnnovak/twyg
#!/usr/bin/env python
import os
from twyg import get_scale_factor, generate_output
DATA_PATH = '../../example-data'
OUT_PATH = '.'
DA = 'google-analytics'
DC = 'cocoa'
DG = 'goals'
DM = 'metrics'
DN = 'animals'
DS = 'six-thinking-hats'
DU = 'guitars'
DW = 'wind-instruments'
DY = 'synthesis'
configs = [
{'boxes': [{ 'data': DC, 'colors': 'kelp' },
{ 'data': DG, 'colors': '' },
{ 'data': DM, 'colors': 'moon' }]
},
{'bubbles': [{ 'data': DA, 'colors': 'inca' },
{ 'data': DM, 'colors': '' },
{ 'data': DS, 'colors': 'neo' }]
},
{'edge' : [{ 'data': DC, 'colors': 'aqua' },
{ 'data': DG, 'colors': 'azure' },
{ 'data': DU, 'colors': '' }]
},
{'flowchart': [{ 'data': DA, 'colors': 'inca' },
{ 'data': DM, 'colors': '' },
{ 'data': DW, 'colors': 'jelly' }]
},
{'hive': [{ 'data': DG, 'colors': 'jelly' },
{ 'data': DS, 'colors': '' },
{ 'data': DY, 'colors': 'mango' }]
},
{'ios': [{ 'data': DM, 'colors': 'milkshake' },
{ 'data': DS, 'colors': 'honey' },
{ 'data': DY, 'colors': '' }]
},
{'jellyfish': [{ 'data': DU, 'colors': '' },
{ 'data': DY, 'colors': 'quartz' },
{ 'data': DN, 'colors': 'colors21' }]
},
{'junction1': [{ 'data': DN, 'colors': 'forest' },
{ 'data': DM, 'colors': 'clay' },
{ 'data': DW, 'colors': '' }]
},
{'junction2': [{ 'data': DN, 'colors': 'mango' },
{ 'data': DU, 'colors': '' },
{ 'data': DW, 'colors': 'salmon' }]
},
{'lines': [{ 'data': DN, 'colors': '' },
{ 'data': DA, 'colors': 'merlot' },
{ 'data': DM, 'colors': 'azure' }]
},
{'modern': [{ 'data': DN, 'colors': '' },
{ 'data': DM, 'colors': 'mustard' },
{ 'data': DY, 'colors': 'cobalt' }]
},
{'nazca': [{ 'data': DC, 'colors': 'earth' },
{ 'data': DM, 'colors': 'aqua' },
{ 'data': DY, 'colors': '' }]
},
{'rounded': [{ 'data': DG, 'colors': '' },
{ 'data': DA, 'colors': 'orbit' },
{ 'data': DM, 'colors': 'grape' }]
},
{'square': [{ 'data': DN, 'colors': 'quartz' },
{ 'data': DC, 'colors': 'crayons' },
{ 'data': DU, 'colors': '' }]
},
{'synapse': [{ 'data': DC, 'colors': 'kelp' },
{ 'data': DA, 'colors': 'mint' },
{ 'data': DM, 'colors': '' }]
},
{'tron': [{ 'data': DC, 'colors': '' },
{ 'data': DM, 'colors': 'mellow' },
{ 'data': DY, 'colors': 'colors21' }]
}
]
def generate_examples(outformat, dpi):
for c in configs:
config_fname = c.keys()[0]
params = c.values()[0]
for p in params:
data_fname = os.path.join(DATA_PATH, p['data'] + '.json')
colorscheme = p['colors']
out_fname = [config_fname]
if colorscheme:
out_fname.append(colorscheme)
out_fname.append(os.path.basename(os.path.splitext(data_fname)[0]))
out_fname = os.path.join(OUT_PATH, outformat,
'-'.join(out_fname) + '.' + outformat)
print "Generating '%s'..." % out_fname,
scale = get_scale_factor(dpi, 1.0);
generate_output(data_fname, config_fname, out_fname, outformat,
colorscheme=colorscheme, scale=scale)
print 'OK'
generate_examples('pdf', 72)
generate_examples('svg', 72)
generate_examples('ps', 72)
generate_examples('png', 150)
|
#!/usr/bin/env python
import os
from twyg import get_scale_factor, generate_output
DATA_PATH = '../../example-data'
OUT_PATH = '.'
DA = 'google-analytics'
DC = 'cocoa'
DG = 'goals'
DM = 'metrics'
DN = 'animals'
DS = 'six-thinking-hats'
DU = 'guitars'
DW = 'wind-instruments'
DY = 'synthesis'
configs = [
{'boxes': [{ 'data': DC, 'colors': 'kelp' },
{ 'data': DG, 'colors': '' },
{ 'data': DM, 'colors': 'moon' }]
},
{'bubbles': [{ 'data': DA, 'colors': 'inca' },
{ 'data': DM, 'colors': '' },
{ 'data': DS, 'colors': 'neo' }]
},
{'edge' : [{ 'data': DC, 'colors': 'aqua' },
{ 'data': DG, 'colors': 'azure' },
{ 'data': DU, 'colors': '' }]
},
{'flowchart': [{ 'data': DA, 'colors': 'inca' },
{ 'data': DM, 'colors': '' },
{ 'data': DW, 'colors': 'jelly' }]
},
{'hive': [{ 'data': DG, 'colors': 'jelly' },
{ 'data': DS, 'colors': '' },
{ 'data': DY, 'colors': 'mango' }]
},
{'ios': [{ 'data': DM, 'colors': 'milkshake' },
{ 'data': DS, 'colors': 'honey' },
{ 'data': DY, 'colors': '' }]
},
{'jellyfish': [{ 'data': DU, 'colors': '' },
{ 'data': DY, 'colors': 'quartz' },
{ 'data': DN, 'colors': 'colors21' }]
},
{'junction1': [{ 'data': DN, 'colors': 'forest' },
{ 'data': DM, 'colors': 'clay' },
{ 'data': DW, 'colors': '' }]
},
{'junction2': [{ 'data': DN, 'colors': 'mango' },
{ 'data': DU, 'colors': '' },
{ 'data': DW, 'colors': 'salmon' }]
},
{'lines': [{ 'data': DN, 'colors': '' },
{ 'data': DA, 'colors': 'merlot' },
{ 'data': DM, 'colors': 'azure' }]
},
{'modern': [{ 'data': DN, 'colors': '' },
{ 'data': DM, 'colors': 'mustard' },
{ 'data': DY, 'colors': 'cobalt' }]
},
{'nazca': [{ 'data': DC, 'colors': 'earth' },
{ 'data': DM, 'colors': 'aqua' },
{ 'data': DY, 'colors': '' }]
},
{'rounded': [{ 'data': DG, 'colors': '' },
{ 'data': DA, 'colors': 'orbit' },
{ 'data': DM, 'colors': 'grape' }]
},
{'square': [{ 'data': DN, 'colors': 'quartz' },
{ 'data': DC, 'colors': 'crayons' },
{ 'data': DU, 'colors': '' }]
},
{'synapse': [{ 'data': DC, 'colors': 'kelp' },
{ 'data': DA, 'colors': 'mint' },
{ 'data': DM, 'colors': '' }]
},
{'tron': [{ 'data': DC, 'colors': '' },
{ 'data': DM, 'colors': 'mellow' },
{ 'data': DY, 'colors': 'colors21' }]
}
]
def generate_examples(outformat, dpi):
for c in configs:
config_fname = c.keys()[0]
params = c.values()[0]
for p in params:
data_fname = os.path.join(DATA_PATH, p['data'] + '.json')
colorscheme = p['colors']
out_fname = [config_fname]
if colorscheme:
out_fname.append(colorscheme)
out_fname.append(os.path.basename(os.path.splitext(data_fname)[0]))
out_fname = os.path.join(OUT_PATH, outformat,
'-'.join(out_fname) + '.' + outformat)
print "Generating '%s'..." % out_fname,
scale = get_scale_factor(dpi, 1.0);
generate_output(data_fname, config_fname, out_fname, outformat,
colorscheme=colorscheme, scale=scale)
print 'OK'
generate_examples('pdf', 72)
generate_examples('svg', 72)
generate_examples('ps', 72)
generate_examples('png', 150)
|
ru
| 0.26433
|
#!/usr/bin/env python
| 2.028402
| 2
|
apps/schools/tests.py
|
cloudartisan/dojomaster
| 1
|
6625746
|
<reponame>cloudartisan/dojomaster<filename>apps/schools/tests.py<gh_stars>1-10
from django.test import TestCase, Client
from django.core.urlresolvers import reverse
def redirect_url(url_name, next_url_name=None, *args, **kwargs):
url = reverse(url_name) + "?next=" + reverse(next_url_name, kwargs=kwargs)
return url
class SchoolsLoginRequiredTests(TestCase):
def setUp(self):
self.client = Client()
def test_schools_list(self):
response = self.client.get(reverse('schools-list'))
expected_redirect = redirect_url('account_login', 'schools-list')
self.assertRedirects(response, expected_redirect)
|
from django.test import TestCase, Client
from django.core.urlresolvers import reverse
def redirect_url(url_name, next_url_name=None, *args, **kwargs):
url = reverse(url_name) + "?next=" + reverse(next_url_name, kwargs=kwargs)
return url
class SchoolsLoginRequiredTests(TestCase):
def setUp(self):
self.client = Client()
def test_schools_list(self):
response = self.client.get(reverse('schools-list'))
expected_redirect = redirect_url('account_login', 'schools-list')
self.assertRedirects(response, expected_redirect)
|
none
| 1
| 2.538615
| 3
|
|
tests/components/honeywell/__init__.py
|
domwillcode/home-assistant
| 30,023
|
6625747
|
"""Tests for honeywell component."""
|
"""Tests for honeywell component."""
|
en
| 0.757158
|
Tests for honeywell component.
| 1.052034
| 1
|
python/turbodbc/api_constants.py
|
arikfr/turbodbc
| 537
|
6625748
|
"""
Global constants as required by PEP-249:
https://www.python.org/dev/peps/pep-0249/#globals
"""
apilevel = "2.0"
threadsafety = 1
paramstyle = 'qmark'
|
"""
Global constants as required by PEP-249:
https://www.python.org/dev/peps/pep-0249/#globals
"""
apilevel = "2.0"
threadsafety = 1
paramstyle = 'qmark'
|
en
| 0.877899
|
Global constants as required by PEP-249: https://www.python.org/dev/peps/pep-0249/#globals
| 1.145437
| 1
|
py/tools/factory_bug.py
|
arccode/factory
| 3
|
6625749
|
<reponame>arccode/factory
#!/usr/bin/env python3
#
# Copyright 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
from collections import namedtuple
import contextlib
import fnmatch
from glob import glob
from itertools import chain
import logging
import os
import stat
import sys
from cros.factory.utils import file_utils
from cros.factory.utils.process_utils import CalledProcessError, Spawn
from cros.factory.utils import sys_utils
# The candidate of device names which the ChromeOS may mount on rootfs.
# Assume that ChromeOS has only one device, and always has a smaller index.
# That is, it should always be `sda`, `nvme0n1`... not `sdb`, `nvme0n2`...
ROOT_DEV_NAME_CANDIDATE = ['sda', 'nvme0n1', 'mmcblk0']
DRAM_CALIBRATION_LOG_FILES = [
# Plain text logs for devices with huge output in memory training, for
# example Kukui.
'DRAMK_LOG',
# On ARM devices that training data is unlikely to change and used by both
# recovery and normal boot, for example Trogdor.
'RO_DDR_TRAINING',
# On ARM devices that may retrain due to aging, for example Kukui.
'RW_DDR_TRAINING',
# On most X86 devices, for recovery boot.
'RECOVERY_MRC_CACHE',
# On most x86 devices, for normal boot.
'RW_MRC_CACHE',
]
# Root directory to use when root partition is USB
USB_ROOT_OUTPUT_DIR = '/mnt/stateful_partition/factory_bug'
DESCRIPTION = """Generate and save zip archive of log files.
This tool always tries to collect log from the on board storage device.
That is, if the current rootfs is a removable devices like usb drive,
this tool will try to find the on board device and collect log from it.
"""
EXAMPLES = """Examples:
When booting from the internal hard disk:
# Save logs to /tmp
factory_bug
# Save logs to a USB drive (using the first one already mounted, or the
# first mountable on any USB device if none is mounted yet)
factory_bug --save-to-removable
When booting from an USB drive:
# Mount on board rootfs, collect logs from there, and save logs to the USB
# drive's stateful partition
factory_bug
# Specify the input device for logs collecting.
factory_bug --input-device /dev/sda
"""
# Info about a mounted partition.
#
# Properties:
# dev: The device that was mounted or re-used.
# mount_point: The mount point of the device.
# temporary: Whether the device is being temporarily mounted.
MountUSBInfo = namedtuple('MountUSBInfo', ['dev', 'mount_point', 'temporary'])
def GetRootDevice():
"""Get the device name on which current rootfs is mounted.
Add '-d' option to return the block device. Otherwise `rootdev` may return
virtual device like `/dev/dm-0`.
Returns:
The rootfs device name, i.e. sda.
"""
dev_raw = Spawn(['rootdev', '-s', '-d'], read_stdout=True,
check_call=True).stdout_data
return os.path.basename(dev_raw.strip())
def IsDeviceRemovable(dev):
"""Check if device is a removable device.
Args:
dev: i.e. sda.
Returns:
True if device is removable.
"""
return file_utils.ReadOneLine(f'/sys/block/{dev}/removable') == '1'
def GetOnboardRootDevice():
"""Get the device name of the on board rootfs.
Returns:
The on board rootfs device name, i.e. sda.
Raises:
RuntimeError if cannot determine the on board rootfs device.
"""
devs = [
dev for dev in ROOT_DEV_NAME_CANDIDATE
if os.path.isdir(f'/sys/block/{dev}') and not IsDeviceRemovable(dev)
]
if not devs:
raise RuntimeError(
'Cannot find on board rootfs device. None of these devices exists and '
f'is not removable: {ROOT_DEV_NAME_CANDIDATE}')
if len(devs) > 1:
raise RuntimeError(
'Multiple devices are found, cannot determine the on board rootfs '
f'device: {devs}')
return devs[0]
def GetDeviceMountPointMapping(dev):
"""Get a mapping of partition -> mount_point.
Args:
dev: i.e. sda.
Returns:
Partition to mount_point mapping. If the partition is not mounted,
mount_point is None.
"""
lines = Spawn(['lsblk', '-nro', 'NAME,MOUNTPOINT', f'/dev/{dev}'],
read_stdout=True, check_call=True).stdout_data.splitlines()
res = {}
for line in lines:
tokens = line.split()
if not tokens:
continue
res[tokens[0]] = tokens[1] if len(tokens) > 1 else None
return res
def GetPartitionName(dev, index):
"""Get partition name from device name and index.
Returns:
Partition name, add `p` between device name and index if the device is not a
sata device.
"""
return f'{dev}{index}' if dev.startswith('sd') else f'{dev}p{index}'
@contextlib.contextmanager
def MountInputDevice(dev):
"""Mount the rootfs on the device and yield the paths.
Try to mount each partition we need if it is not already mounted.
According to `src/platform2/init/upstart/test-init/factory_utils.sh`, when
device is booted with a test image (usually it should be, if the devices have
run factory toolkit), there are two partitions 1 and 3 which contain the info
we need.
`/etc` is under root partition which is partition 3.
`/usr/local` is bind-mounted to '/mnt/stateful_partition/dev_image' which is
`dev_image` under partition 1.
Normally, '/var' is mounted by `mount_encrypted`. However, if factory toolkit
is enabled, `/var` is bind-mounted to '/mnt/stateful_partition/var' which is
`var` under partition 1.
Note that partition 3 (rootfs) is mounted with ext2. Don't use ext4 which may
modify fs superblock information and making rootfs verification fail.
Yields:
A dict of paths indicating where should the log directories be redirected
to.
"""
mount_point_map = GetDeviceMountPointMapping(dev)
with contextlib.ExitStack() as stack:
mount = {}
for part_id, fstype in [(1, 'ext4'), (3, 'ext2')]:
part = GetPartitionName(dev, part_id)
if mount_point_map[part]:
mount[part_id] = mount_point_map[part]
else:
# If the partition is mounted by us, unmount it at the end.
unmounter = sys_utils.MountPartition(f'/dev/{part}', fstype=fstype)
mount[part_id] = stack.enter_context(unmounter)
yield {
'var': os.path.join(mount[1], 'var'),
'usr_local': os.path.join(mount[1], 'dev_image'),
'etc': os.path.join(mount[3], 'etc'),
}
@contextlib.contextmanager
def MountRemovable(read_only=False):
"""Mounts (or re-uses) a removable device.
Scan all the devices under /sys/block/, use the first removable device. Check
if there is any mounted partition. Yield that partition or try to mount a
partition of the device.
Args:
read_only: If we mount device we mount it read only. Used by goofy_rpc.py
Yields:
MountUSBInfo: This is used by goofy_rpc.py
Raises:
RuntimeError if no removable device is available or cannot mount any
partition of a removable device.
"""
devices = [os.path.basename(x) for x in glob('/sys/block/*')]
removables = [x for x in devices if IsDeviceRemovable(x)]
if not removables:
raise RuntimeError('No removable device is available.')
if len(removables) > 1:
logging.warning('More than one removable devices are found: %s', removables)
dev = removables[0]
mount_point_map = GetDeviceMountPointMapping(dev)
for part, mount_point in mount_point_map.items():
if mount_point:
logging.info('Using mounted device %s on %s', part, mount_point)
yield MountUSBInfo(dev=dev, mount_point=mount_point, temporary=False)
# The device is synced once to make sure the data is written to the device
# since we cannot guarantee that this device will be unmount correctly.
Spawn(['sync'], call=True)
return
# Try to mount the whole device first, then try to mount each partition.
partitions = sorted(x for x in mount_point_map)
for part in partitions:
try:
mounter = sys_utils.MountPartition(f'/dev/{part}', rw=not read_only)
except Exception:
logging.debug('Mount %s failed.', part)
continue
with mounter as mount_point:
logging.warning('Mount success. Mounted `%s` at `%s`', part, mount_point)
yield MountUSBInfo(dev=dev, mount_point=mount_point, temporary=True)
return
raise RuntimeError(f'Unable to mount any of {partitions}')
def GlobAll(*args):
"""`glob` all arguments. For prettier formatting.
Args:
Paths for `glob`.
Returns:
List of all globed paths.
"""
return list(chain.from_iterable(glob(x) for x in args))
def RunCommandAndSaveOutputToFile(command, filename, check_call=True,
include_stderr=False):
"""Run command and save its output to file.
Args:
command: Command pass to `Spawn()`.
filename: Filename to write output.
check_call: If true, check if the command return non-zero.
include_stderr: To include stderr in output file or not.
Returns:
filename
"""
with open(filename, 'w') as f:
options = {
'stdout': f,
}
if check_call:
options['check_call'] = True
else:
options['call'] = True
if include_stderr:
options['stderr'] = f
else:
options['ignore_stderr'] = True
logging.info('Generating %s', filename)
logging.debug('Output: %s, Check Call: %s, Inlcude Stderr: %s, Command: %s',
filename, check_call, include_stderr, command)
Spawn(command, **options)
return filename
def HasEC():
"""SuperIO-based platform has no EC chip, check its existence first.
Returns:
True if the platform has EC chip.
"""
try:
has_ec = Spawn(['ectool', 'version'], read_stdout=True,
ignore_stderr=True).returncode == 0
except OSError:
# The system might not have 'ectool' command if the platform has no EC chip.
has_ec = False
return has_ec
def AppendLogToABT(abt_file, log_file):
for f in [abt_file, log_file]:
if not os.path.isfile(f):
logging.warning('%s is not a valid file.', f)
return
logging.debug('ABT: adding %s.', log_file)
with open(abt_file, 'ab') as f:
f.write(b'%s=<multi-line>\n' % log_file.encode('utf-8'))
f.write(b'---------- START ----------\n')
f.write(file_utils.ReadFile(log_file, encoding=None))
f.write(b'---------- END ----------\n')
def CreateABTFile(files, exclude_patterns):
"""Create abt.txt to provide easier log access.
We utilize the ABT browser extension to provide easier log access on browser,
which needs an embedded `abt.txt` in the factory bug archive.
We don't want to put all the contents into abt.txt since this will generate
an archive which size is twice than the original one.
By default, all directories are ignored from abt.txt. Extra include files and
exclude patterns are listed below.
Args:
files: The current file list of `SaveLogs`.
exclude_patterns: The current exclude patterns of `SaveLogs`.
Returns:
abt_name: Name of abt file.
"""
abt_files = (
files + GlobAll(
# These files are listed because we only included their parent
# directories which will be ignored in abt.txt.
'sys/fs/pstore/console-ramoops-0',
'var/factory/log/*.log',
'var/log/messages',
'var/log/power_manager/powerd.LATEST',
))
abt_exclude_patterns = (
exclude_patterns +
[f for f in DRAM_CALIBRATION_LOG_FILES if f != 'DRAMK_LOG'])
abt_name = 'abt.txt'
file_utils.TouchFile(abt_name)
for file in abt_files:
if not os.path.isfile(file):
continue
if any(fnmatch.fnmatch(file, pattern) for pattern in abt_exclude_patterns):
continue
AppendLogToABT(abt_name, file)
return abt_name
def GenerateDRAMCalibrationLog():
with file_utils.UnopenedTemporaryFile() as bios_bin:
Spawn(['flashrom', '-p', 'host', '-r', bios_bin], check_call=True,
ignore_stdout=True, ignore_stderr=True)
# This command generates files under current directory.
Spawn(['dump_fmap', '-x', bios_bin] + DRAM_CALIBRATION_LOG_FILES,
check_call=True, ignore_stdout=True, ignore_stderr=True)
# Special case of trimming DRAMK_LOG. DRAMK_LOG is a readable file with some
# noise appended, like this: TEXT + 0x00 + (0xff)*N
if os.path.isfile('DRAMK_LOG'):
with open('DRAMK_LOG', 'rb+') as f:
data = f.read()
f.seek(0)
f.write(data.strip(b'\xff').strip(b'\x00'))
f.truncate()
return [log for log in DRAM_CALIBRATION_LOG_FILES if os.path.isfile(log)]
def SaveLogs(output_dir, archive_id=None, net=False, probe=False, dram=False,
abt=False, var='/var', usr_local='/usr/local', etc='/etc'):
"""Saves dmesg and relevant log files to a new archive in output_dir.
The archive will be named factory_bug.<description>.zip,
where description is the 'archive_id' argument (if provided).
Args:
output_dir: The directory in which to create the file.
include_network_log: Whether to include network related logs or not.
archive_id: An optional short ID to put in the filename (so
archives may be more easily differentiated).
probe: True to include probe result in the logs.
dram: True to include DRAM calibration logs.
abt: True to include abt.txt for Android Bug Tool.
var, usr_local, etc: Paths to the relevant directories.
Returns:
The name of the zip archive joined with `output_dir`.
"""
output_dir = os.path.realpath(output_dir)
filename = 'factory_bug.'
if archive_id:
filename += archive_id.replace('/', '') + '.'
filename += 'zip'
output_file = os.path.join(output_dir, filename)
if os.path.exists(output_file):
raise RuntimeError('Same filename [%s] exists. Use `factory_bug --id` or '
'add description in goofy UI dialog.' % filename)
if sys_utils.InChroot():
# Just save a dummy zip.
with file_utils.TempDirectory() as d:
open(os.path.join(os.path.join(d, 'dummy-factory-bug')), 'w').close()
Spawn(['zip', os.path.join(d, output_file),
os.path.join(d, 'dummy-factory-bug')], check_call=True)
return output_file
with file_utils.TempDirectory(prefix='factory_bug.') as tmp:
# Link these paths so their path will remain the same in the zip archive.
os.symlink(var, os.path.join(tmp, 'var'))
file_utils.TryMakeDirs(os.path.join(tmp, 'usr'))
os.symlink(usr_local, os.path.join(tmp, 'usr', 'local'))
os.symlink(etc, os.path.join(tmp, 'etc'))
# These are hardcoded paths because they are virtual filesystems. The data
# we want is always in /dev and /sys, never on the real devices.
os.symlink('/sys', os.path.join(tmp, 'sys'))
os.chdir(tmp)
Run = RunCommandAndSaveOutputToFile
files = [
Run('crossystem', filename='crossystem', include_stderr=True),
Run('dmesg', filename='dmesg'),
Run(['mosys', 'eventlog', 'list'], filename='mosys_eventlog',
check_call=False, include_stderr=True),
Run('audio_diagnostics', filename='audio_diagnostics', check_call=False,
include_stderr=True),
# Cannot zip an unseekable file, need to manually copy it instead.
Run(['cat', 'sys/firmware/log'], filename='bios_log', check_call=False),
]
if HasEC():
files += [
Run(['ectool', 'version'], filename='ec_version'),
Run(['ectool', 'console'], filename='ec_console', check_call=False,
include_stderr=True),
]
if probe:
files += [
Run(['hwid', 'probe'], filename='probe_result.json',
check_call=False),
]
if dram:
files += GenerateDRAMCalibrationLog()
files += GlobAll(
'etc/lsb-release',
'sys/fs/pstore',
'usr/local/etc/lsb-*',
'usr/local/factory/TOOLKIT_VERSION',
'usr/local/factory/hwid',
'var/factory',
'var/log',
'var/spool/crash',
)
# Patterns for those files which are excluded from factory_bug.
exclude_patterns = [
'var/log/journal/*',
]
if not net:
exclude_patterns += ['var/log/net.log']
if abt:
files += [CreateABTFile(files, exclude_patterns)]
file_utils.TryMakeDirs(os.path.dirname(output_file))
logging.info('Saving %s to %s...', files, output_file)
try:
zip_command = (['zip', '-r', output_file] + files + ['--exclude'] +
exclude_patterns)
Spawn(zip_command, cwd=tmp, check_call=True, read_stdout=True,
read_stderr=True)
except CalledProcessError as e:
# 1 = non-fatal errors like "some files differ"
if e.returncode != 1:
logging.error(
'Command "zip" exited with return code: %d\n'
'Stdout: %s\n Stderr: %s\n', e.returncode, e.stdout, e.stderr)
raise
logging.warning('Wrote %s (%d bytes)', output_file,
os.path.getsize(output_file))
return output_file
def ParseArgument():
"""argparse config
Returns:
(parser, args)
parser: the argparse.ArgumentParser object, export for `parser.error()`.
args: parsed command line arguments.
"""
parser = argparse.ArgumentParser(
description=DESCRIPTION, epilog=EXAMPLES,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'--output-dir', '-o', metavar='DIR',
help=('Output directory in which to save file. Normally default to '
f'`/tmp`, but defaults to `{USB_ROOT_OUTPUT_DIR}` when booted '
'from USB.'))
parser.add_argument(
'--save-to-removable', '-s', action='store_true',
help=('Save logs to a USB stick. (Using any mounted USB drive partition '
'if available, otherwise attempting to temporarily mount one)'))
parser.add_argument(
'--input-device', '-d', metavar='DEV',
help=('Collect logs from the specific device. Input device is detected '
'automatically if omitted.'))
parser.add_argument(
'--net', action='store_true',
help=('Whether to include network related logs or not. Network logs are '
'excluded by default.'))
parser.add_argument(
'--id', '-i', metavar='ID',
help=('Short ID to include in file name to help differentiate archives.'))
parser.add_argument('--probe', action='store_true',
help=('Include probe result in the logs.'))
parser.add_argument('--dram', action='store_true',
help=('Include DRAM calibration info in the logs.'))
parser.add_argument('--no-abt', action='store_false', dest='abt',
help=('Create abt.txt for "Android Bug Tool".'))
parser.add_argument(
'--full', action='store_true',
help=('Produce a complete factory_bug. When --full is set --net, --probe'
' and --dram are implied. For details see the description of each '
'option.'))
parser.add_argument('--verbosity', '-v', action='count', default=0,
help=('Change the logging verbosity.'))
return parser, parser.parse_args()
def IsBlockDevice(dev_path):
return os.path.exists(dev_path) and stat.S_ISBLK(os.stat(dev_path).st_mode)
@contextlib.contextmanager
def InputDevice(root_is_removable, input_device):
"""Get input paths. See `MountInputDevice`."""
if input_device:
if not IsBlockDevice(input_device):
logging.error('"%s" is not a block device.', input_device)
sys.exit(1)
elif root_is_removable:
input_device = GetOnboardRootDevice()
logging.info('Root is removable. Try to collect logs from "%s"',
input_device)
else:
yield {}
return
with MountInputDevice(input_device) as paths:
yield paths
@contextlib.contextmanager
def OutputDevice(root_is_removable, save_to_removable, output_dir, parser):
"""Get output path."""
if save_to_removable:
if root_is_removable:
parser.error(
'--save-to-removable only applies when root device is not removable.')
with MountRemovable() as mount:
yield mount.mount_point
return
if not output_dir:
output_dir = USB_ROOT_OUTPUT_DIR if root_is_removable else '/tmp'
yield output_dir
def main():
parser, args = ParseArgument()
logging.basicConfig(level=logging.WARNING - 10 * args.verbosity)
options = dict((key, getattr(args, key) or args.full)
for key in ['net', 'probe', 'dram'])
root_is_removable = IsDeviceRemovable(GetRootDevice())
input_device = InputDevice(root_is_removable, args.input_device)
output_device = OutputDevice(root_is_removable, args.save_to_removable,
args.output_dir, parser)
with input_device as input_paths, output_device as output_path:
SaveLogs(output_path, args.id, **options, **input_paths)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
#
# Copyright 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
from collections import namedtuple
import contextlib
import fnmatch
from glob import glob
from itertools import chain
import logging
import os
import stat
import sys
from cros.factory.utils import file_utils
from cros.factory.utils.process_utils import CalledProcessError, Spawn
from cros.factory.utils import sys_utils
# The candidate of device names which the ChromeOS may mount on rootfs.
# Assume that ChromeOS has only one device, and always has a smaller index.
# That is, it should always be `sda`, `nvme0n1`... not `sdb`, `nvme0n2`...
ROOT_DEV_NAME_CANDIDATE = ['sda', 'nvme0n1', 'mmcblk0']
DRAM_CALIBRATION_LOG_FILES = [
# Plain text logs for devices with huge output in memory training, for
# example Kukui.
'DRAMK_LOG',
# On ARM devices that training data is unlikely to change and used by both
# recovery and normal boot, for example Trogdor.
'RO_DDR_TRAINING',
# On ARM devices that may retrain due to aging, for example Kukui.
'RW_DDR_TRAINING',
# On most X86 devices, for recovery boot.
'RECOVERY_MRC_CACHE',
# On most x86 devices, for normal boot.
'RW_MRC_CACHE',
]
# Root directory to use when root partition is USB
USB_ROOT_OUTPUT_DIR = '/mnt/stateful_partition/factory_bug'
DESCRIPTION = """Generate and save zip archive of log files.
This tool always tries to collect log from the on board storage device.
That is, if the current rootfs is a removable devices like usb drive,
this tool will try to find the on board device and collect log from it.
"""
EXAMPLES = """Examples:
When booting from the internal hard disk:
# Save logs to /tmp
factory_bug
# Save logs to a USB drive (using the first one already mounted, or the
# first mountable on any USB device if none is mounted yet)
factory_bug --save-to-removable
When booting from an USB drive:
# Mount on board rootfs, collect logs from there, and save logs to the USB
# drive's stateful partition
factory_bug
# Specify the input device for logs collecting.
factory_bug --input-device /dev/sda
"""
# Info about a mounted partition.
#
# Properties:
# dev: The device that was mounted or re-used.
# mount_point: The mount point of the device.
# temporary: Whether the device is being temporarily mounted.
MountUSBInfo = namedtuple('MountUSBInfo', ['dev', 'mount_point', 'temporary'])
def GetRootDevice():
"""Get the device name on which current rootfs is mounted.
Add '-d' option to return the block device. Otherwise `rootdev` may return
virtual device like `/dev/dm-0`.
Returns:
The rootfs device name, i.e. sda.
"""
dev_raw = Spawn(['rootdev', '-s', '-d'], read_stdout=True,
check_call=True).stdout_data
return os.path.basename(dev_raw.strip())
def IsDeviceRemovable(dev):
"""Check if device is a removable device.
Args:
dev: i.e. sda.
Returns:
True if device is removable.
"""
return file_utils.ReadOneLine(f'/sys/block/{dev}/removable') == '1'
def GetOnboardRootDevice():
"""Get the device name of the on board rootfs.
Returns:
The on board rootfs device name, i.e. sda.
Raises:
RuntimeError if cannot determine the on board rootfs device.
"""
devs = [
dev for dev in ROOT_DEV_NAME_CANDIDATE
if os.path.isdir(f'/sys/block/{dev}') and not IsDeviceRemovable(dev)
]
if not devs:
raise RuntimeError(
'Cannot find on board rootfs device. None of these devices exists and '
f'is not removable: {ROOT_DEV_NAME_CANDIDATE}')
if len(devs) > 1:
raise RuntimeError(
'Multiple devices are found, cannot determine the on board rootfs '
f'device: {devs}')
return devs[0]
def GetDeviceMountPointMapping(dev):
"""Get a mapping of partition -> mount_point.
Args:
dev: i.e. sda.
Returns:
Partition to mount_point mapping. If the partition is not mounted,
mount_point is None.
"""
lines = Spawn(['lsblk', '-nro', 'NAME,MOUNTPOINT', f'/dev/{dev}'],
read_stdout=True, check_call=True).stdout_data.splitlines()
res = {}
for line in lines:
tokens = line.split()
if not tokens:
continue
res[tokens[0]] = tokens[1] if len(tokens) > 1 else None
return res
def GetPartitionName(dev, index):
"""Get partition name from device name and index.
Returns:
Partition name, add `p` between device name and index if the device is not a
sata device.
"""
return f'{dev}{index}' if dev.startswith('sd') else f'{dev}p{index}'
@contextlib.contextmanager
def MountInputDevice(dev):
"""Mount the rootfs on the device and yield the paths.
Try to mount each partition we need if it is not already mounted.
According to `src/platform2/init/upstart/test-init/factory_utils.sh`, when
device is booted with a test image (usually it should be, if the devices have
run factory toolkit), there are two partitions 1 and 3 which contain the info
we need.
`/etc` is under root partition which is partition 3.
`/usr/local` is bind-mounted to '/mnt/stateful_partition/dev_image' which is
`dev_image` under partition 1.
Normally, '/var' is mounted by `mount_encrypted`. However, if factory toolkit
is enabled, `/var` is bind-mounted to '/mnt/stateful_partition/var' which is
`var` under partition 1.
Note that partition 3 (rootfs) is mounted with ext2. Don't use ext4 which may
modify fs superblock information and making rootfs verification fail.
Yields:
A dict of paths indicating where should the log directories be redirected
to.
"""
mount_point_map = GetDeviceMountPointMapping(dev)
with contextlib.ExitStack() as stack:
mount = {}
for part_id, fstype in [(1, 'ext4'), (3, 'ext2')]:
part = GetPartitionName(dev, part_id)
if mount_point_map[part]:
mount[part_id] = mount_point_map[part]
else:
# If the partition is mounted by us, unmount it at the end.
unmounter = sys_utils.MountPartition(f'/dev/{part}', fstype=fstype)
mount[part_id] = stack.enter_context(unmounter)
yield {
'var': os.path.join(mount[1], 'var'),
'usr_local': os.path.join(mount[1], 'dev_image'),
'etc': os.path.join(mount[3], 'etc'),
}
@contextlib.contextmanager
def MountRemovable(read_only=False):
"""Mounts (or re-uses) a removable device.
Scan all the devices under /sys/block/, use the first removable device. Check
if there is any mounted partition. Yield that partition or try to mount a
partition of the device.
Args:
read_only: If we mount device we mount it read only. Used by goofy_rpc.py
Yields:
MountUSBInfo: This is used by goofy_rpc.py
Raises:
RuntimeError if no removable device is available or cannot mount any
partition of a removable device.
"""
devices = [os.path.basename(x) for x in glob('/sys/block/*')]
removables = [x for x in devices if IsDeviceRemovable(x)]
if not removables:
raise RuntimeError('No removable device is available.')
if len(removables) > 1:
logging.warning('More than one removable devices are found: %s', removables)
dev = removables[0]
mount_point_map = GetDeviceMountPointMapping(dev)
for part, mount_point in mount_point_map.items():
if mount_point:
logging.info('Using mounted device %s on %s', part, mount_point)
yield MountUSBInfo(dev=dev, mount_point=mount_point, temporary=False)
# The device is synced once to make sure the data is written to the device
# since we cannot guarantee that this device will be unmount correctly.
Spawn(['sync'], call=True)
return
# Try to mount the whole device first, then try to mount each partition.
partitions = sorted(x for x in mount_point_map)
for part in partitions:
try:
mounter = sys_utils.MountPartition(f'/dev/{part}', rw=not read_only)
except Exception:
logging.debug('Mount %s failed.', part)
continue
with mounter as mount_point:
logging.warning('Mount success. Mounted `%s` at `%s`', part, mount_point)
yield MountUSBInfo(dev=dev, mount_point=mount_point, temporary=True)
return
raise RuntimeError(f'Unable to mount any of {partitions}')
def GlobAll(*args):
"""`glob` all arguments. For prettier formatting.
Args:
Paths for `glob`.
Returns:
List of all globed paths.
"""
return list(chain.from_iterable(glob(x) for x in args))
def RunCommandAndSaveOutputToFile(command, filename, check_call=True,
include_stderr=False):
"""Run command and save its output to file.
Args:
command: Command pass to `Spawn()`.
filename: Filename to write output.
check_call: If true, check if the command return non-zero.
include_stderr: To include stderr in output file or not.
Returns:
filename
"""
with open(filename, 'w') as f:
options = {
'stdout': f,
}
if check_call:
options['check_call'] = True
else:
options['call'] = True
if include_stderr:
options['stderr'] = f
else:
options['ignore_stderr'] = True
logging.info('Generating %s', filename)
logging.debug('Output: %s, Check Call: %s, Inlcude Stderr: %s, Command: %s',
filename, check_call, include_stderr, command)
Spawn(command, **options)
return filename
def HasEC():
"""SuperIO-based platform has no EC chip, check its existence first.
Returns:
True if the platform has EC chip.
"""
try:
has_ec = Spawn(['ectool', 'version'], read_stdout=True,
ignore_stderr=True).returncode == 0
except OSError:
# The system might not have 'ectool' command if the platform has no EC chip.
has_ec = False
return has_ec
def AppendLogToABT(abt_file, log_file):
for f in [abt_file, log_file]:
if not os.path.isfile(f):
logging.warning('%s is not a valid file.', f)
return
logging.debug('ABT: adding %s.', log_file)
with open(abt_file, 'ab') as f:
f.write(b'%s=<multi-line>\n' % log_file.encode('utf-8'))
f.write(b'---------- START ----------\n')
f.write(file_utils.ReadFile(log_file, encoding=None))
f.write(b'---------- END ----------\n')
def CreateABTFile(files, exclude_patterns):
"""Create abt.txt to provide easier log access.
We utilize the ABT browser extension to provide easier log access on browser,
which needs an embedded `abt.txt` in the factory bug archive.
We don't want to put all the contents into abt.txt since this will generate
an archive which size is twice than the original one.
By default, all directories are ignored from abt.txt. Extra include files and
exclude patterns are listed below.
Args:
files: The current file list of `SaveLogs`.
exclude_patterns: The current exclude patterns of `SaveLogs`.
Returns:
abt_name: Name of abt file.
"""
abt_files = (
files + GlobAll(
# These files are listed because we only included their parent
# directories which will be ignored in abt.txt.
'sys/fs/pstore/console-ramoops-0',
'var/factory/log/*.log',
'var/log/messages',
'var/log/power_manager/powerd.LATEST',
))
abt_exclude_patterns = (
exclude_patterns +
[f for f in DRAM_CALIBRATION_LOG_FILES if f != 'DRAMK_LOG'])
abt_name = 'abt.txt'
file_utils.TouchFile(abt_name)
for file in abt_files:
if not os.path.isfile(file):
continue
if any(fnmatch.fnmatch(file, pattern) for pattern in abt_exclude_patterns):
continue
AppendLogToABT(abt_name, file)
return abt_name
def GenerateDRAMCalibrationLog():
with file_utils.UnopenedTemporaryFile() as bios_bin:
Spawn(['flashrom', '-p', 'host', '-r', bios_bin], check_call=True,
ignore_stdout=True, ignore_stderr=True)
# This command generates files under current directory.
Spawn(['dump_fmap', '-x', bios_bin] + DRAM_CALIBRATION_LOG_FILES,
check_call=True, ignore_stdout=True, ignore_stderr=True)
# Special case of trimming DRAMK_LOG. DRAMK_LOG is a readable file with some
# noise appended, like this: TEXT + 0x00 + (0xff)*N
if os.path.isfile('DRAMK_LOG'):
with open('DRAMK_LOG', 'rb+') as f:
data = f.read()
f.seek(0)
f.write(data.strip(b'\xff').strip(b'\x00'))
f.truncate()
return [log for log in DRAM_CALIBRATION_LOG_FILES if os.path.isfile(log)]
def SaveLogs(output_dir, archive_id=None, net=False, probe=False, dram=False,
abt=False, var='/var', usr_local='/usr/local', etc='/etc'):
"""Saves dmesg and relevant log files to a new archive in output_dir.
The archive will be named factory_bug.<description>.zip,
where description is the 'archive_id' argument (if provided).
Args:
output_dir: The directory in which to create the file.
include_network_log: Whether to include network related logs or not.
archive_id: An optional short ID to put in the filename (so
archives may be more easily differentiated).
probe: True to include probe result in the logs.
dram: True to include DRAM calibration logs.
abt: True to include abt.txt for Android Bug Tool.
var, usr_local, etc: Paths to the relevant directories.
Returns:
The name of the zip archive joined with `output_dir`.
"""
output_dir = os.path.realpath(output_dir)
filename = 'factory_bug.'
if archive_id:
filename += archive_id.replace('/', '') + '.'
filename += 'zip'
output_file = os.path.join(output_dir, filename)
if os.path.exists(output_file):
raise RuntimeError('Same filename [%s] exists. Use `factory_bug --id` or '
'add description in goofy UI dialog.' % filename)
if sys_utils.InChroot():
# Just save a dummy zip.
with file_utils.TempDirectory() as d:
open(os.path.join(os.path.join(d, 'dummy-factory-bug')), 'w').close()
Spawn(['zip', os.path.join(d, output_file),
os.path.join(d, 'dummy-factory-bug')], check_call=True)
return output_file
with file_utils.TempDirectory(prefix='factory_bug.') as tmp:
# Link these paths so their path will remain the same in the zip archive.
os.symlink(var, os.path.join(tmp, 'var'))
file_utils.TryMakeDirs(os.path.join(tmp, 'usr'))
os.symlink(usr_local, os.path.join(tmp, 'usr', 'local'))
os.symlink(etc, os.path.join(tmp, 'etc'))
# These are hardcoded paths because they are virtual filesystems. The data
# we want is always in /dev and /sys, never on the real devices.
os.symlink('/sys', os.path.join(tmp, 'sys'))
os.chdir(tmp)
Run = RunCommandAndSaveOutputToFile
files = [
Run('crossystem', filename='crossystem', include_stderr=True),
Run('dmesg', filename='dmesg'),
Run(['mosys', 'eventlog', 'list'], filename='mosys_eventlog',
check_call=False, include_stderr=True),
Run('audio_diagnostics', filename='audio_diagnostics', check_call=False,
include_stderr=True),
# Cannot zip an unseekable file, need to manually copy it instead.
Run(['cat', 'sys/firmware/log'], filename='bios_log', check_call=False),
]
if HasEC():
files += [
Run(['ectool', 'version'], filename='ec_version'),
Run(['ectool', 'console'], filename='ec_console', check_call=False,
include_stderr=True),
]
if probe:
files += [
Run(['hwid', 'probe'], filename='probe_result.json',
check_call=False),
]
if dram:
files += GenerateDRAMCalibrationLog()
files += GlobAll(
'etc/lsb-release',
'sys/fs/pstore',
'usr/local/etc/lsb-*',
'usr/local/factory/TOOLKIT_VERSION',
'usr/local/factory/hwid',
'var/factory',
'var/log',
'var/spool/crash',
)
# Patterns for those files which are excluded from factory_bug.
exclude_patterns = [
'var/log/journal/*',
]
if not net:
exclude_patterns += ['var/log/net.log']
if abt:
files += [CreateABTFile(files, exclude_patterns)]
file_utils.TryMakeDirs(os.path.dirname(output_file))
logging.info('Saving %s to %s...', files, output_file)
try:
zip_command = (['zip', '-r', output_file] + files + ['--exclude'] +
exclude_patterns)
Spawn(zip_command, cwd=tmp, check_call=True, read_stdout=True,
read_stderr=True)
except CalledProcessError as e:
# 1 = non-fatal errors like "some files differ"
if e.returncode != 1:
logging.error(
'Command "zip" exited with return code: %d\n'
'Stdout: %s\n Stderr: %s\n', e.returncode, e.stdout, e.stderr)
raise
logging.warning('Wrote %s (%d bytes)', output_file,
os.path.getsize(output_file))
return output_file
def ParseArgument():
"""argparse config
Returns:
(parser, args)
parser: the argparse.ArgumentParser object, export for `parser.error()`.
args: parsed command line arguments.
"""
parser = argparse.ArgumentParser(
description=DESCRIPTION, epilog=EXAMPLES,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'--output-dir', '-o', metavar='DIR',
help=('Output directory in which to save file. Normally default to '
f'`/tmp`, but defaults to `{USB_ROOT_OUTPUT_DIR}` when booted '
'from USB.'))
parser.add_argument(
'--save-to-removable', '-s', action='store_true',
help=('Save logs to a USB stick. (Using any mounted USB drive partition '
'if available, otherwise attempting to temporarily mount one)'))
parser.add_argument(
'--input-device', '-d', metavar='DEV',
help=('Collect logs from the specific device. Input device is detected '
'automatically if omitted.'))
parser.add_argument(
'--net', action='store_true',
help=('Whether to include network related logs or not. Network logs are '
'excluded by default.'))
parser.add_argument(
'--id', '-i', metavar='ID',
help=('Short ID to include in file name to help differentiate archives.'))
parser.add_argument('--probe', action='store_true',
help=('Include probe result in the logs.'))
parser.add_argument('--dram', action='store_true',
help=('Include DRAM calibration info in the logs.'))
parser.add_argument('--no-abt', action='store_false', dest='abt',
help=('Create abt.txt for "Android Bug Tool".'))
parser.add_argument(
'--full', action='store_true',
help=('Produce a complete factory_bug. When --full is set --net, --probe'
' and --dram are implied. For details see the description of each '
'option.'))
parser.add_argument('--verbosity', '-v', action='count', default=0,
help=('Change the logging verbosity.'))
return parser, parser.parse_args()
def IsBlockDevice(dev_path):
return os.path.exists(dev_path) and stat.S_ISBLK(os.stat(dev_path).st_mode)
@contextlib.contextmanager
def InputDevice(root_is_removable, input_device):
"""Get input paths. See `MountInputDevice`."""
if input_device:
if not IsBlockDevice(input_device):
logging.error('"%s" is not a block device.', input_device)
sys.exit(1)
elif root_is_removable:
input_device = GetOnboardRootDevice()
logging.info('Root is removable. Try to collect logs from "%s"',
input_device)
else:
yield {}
return
with MountInputDevice(input_device) as paths:
yield paths
@contextlib.contextmanager
def OutputDevice(root_is_removable, save_to_removable, output_dir, parser):
"""Get output path."""
if save_to_removable:
if root_is_removable:
parser.error(
'--save-to-removable only applies when root device is not removable.')
with MountRemovable() as mount:
yield mount.mount_point
return
if not output_dir:
output_dir = USB_ROOT_OUTPUT_DIR if root_is_removable else '/tmp'
yield output_dir
def main():
parser, args = ParseArgument()
logging.basicConfig(level=logging.WARNING - 10 * args.verbosity)
options = dict((key, getattr(args, key) or args.full)
for key in ['net', 'probe', 'dram'])
root_is_removable = IsDeviceRemovable(GetRootDevice())
input_device = InputDevice(root_is_removable, args.input_device)
output_device = OutputDevice(root_is_removable, args.save_to_removable,
args.output_dir, parser)
with input_device as input_paths, output_device as output_path:
SaveLogs(output_path, args.id, **options, **input_paths)
if __name__ == '__main__':
main()
|
en
| 0.874809
|
#!/usr/bin/env python3 # # Copyright 2012 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # The candidate of device names which the ChromeOS may mount on rootfs. # Assume that ChromeOS has only one device, and always has a smaller index. # That is, it should always be `sda`, `nvme0n1`... not `sdb`, `nvme0n2`... # Plain text logs for devices with huge output in memory training, for # example Kukui. # On ARM devices that training data is unlikely to change and used by both # recovery and normal boot, for example Trogdor. # On ARM devices that may retrain due to aging, for example Kukui. # On most X86 devices, for recovery boot. # On most x86 devices, for normal boot. # Root directory to use when root partition is USB Generate and save zip archive of log files. This tool always tries to collect log from the on board storage device. That is, if the current rootfs is a removable devices like usb drive, this tool will try to find the on board device and collect log from it. Examples: When booting from the internal hard disk: # Save logs to /tmp factory_bug # Save logs to a USB drive (using the first one already mounted, or the # first mountable on any USB device if none is mounted yet) factory_bug --save-to-removable When booting from an USB drive: # Mount on board rootfs, collect logs from there, and save logs to the USB # drive's stateful partition factory_bug # Specify the input device for logs collecting. factory_bug --input-device /dev/sda # Info about a mounted partition. # # Properties: # dev: The device that was mounted or re-used. # mount_point: The mount point of the device. # temporary: Whether the device is being temporarily mounted. Get the device name on which current rootfs is mounted. Add '-d' option to return the block device. Otherwise `rootdev` may return virtual device like `/dev/dm-0`. Returns: The rootfs device name, i.e. sda. Check if device is a removable device. Args: dev: i.e. sda. Returns: True if device is removable. Get the device name of the on board rootfs. Returns: The on board rootfs device name, i.e. sda. Raises: RuntimeError if cannot determine the on board rootfs device. Get a mapping of partition -> mount_point. Args: dev: i.e. sda. Returns: Partition to mount_point mapping. If the partition is not mounted, mount_point is None. Get partition name from device name and index. Returns: Partition name, add `p` between device name and index if the device is not a sata device. Mount the rootfs on the device and yield the paths. Try to mount each partition we need if it is not already mounted. According to `src/platform2/init/upstart/test-init/factory_utils.sh`, when device is booted with a test image (usually it should be, if the devices have run factory toolkit), there are two partitions 1 and 3 which contain the info we need. `/etc` is under root partition which is partition 3. `/usr/local` is bind-mounted to '/mnt/stateful_partition/dev_image' which is `dev_image` under partition 1. Normally, '/var' is mounted by `mount_encrypted`. However, if factory toolkit is enabled, `/var` is bind-mounted to '/mnt/stateful_partition/var' which is `var` under partition 1. Note that partition 3 (rootfs) is mounted with ext2. Don't use ext4 which may modify fs superblock information and making rootfs verification fail. Yields: A dict of paths indicating where should the log directories be redirected to. # If the partition is mounted by us, unmount it at the end. Mounts (or re-uses) a removable device. Scan all the devices under /sys/block/, use the first removable device. Check if there is any mounted partition. Yield that partition or try to mount a partition of the device. Args: read_only: If we mount device we mount it read only. Used by goofy_rpc.py Yields: MountUSBInfo: This is used by goofy_rpc.py Raises: RuntimeError if no removable device is available or cannot mount any partition of a removable device. # The device is synced once to make sure the data is written to the device # since we cannot guarantee that this device will be unmount correctly. # Try to mount the whole device first, then try to mount each partition. `glob` all arguments. For prettier formatting. Args: Paths for `glob`. Returns: List of all globed paths. Run command and save its output to file. Args: command: Command pass to `Spawn()`. filename: Filename to write output. check_call: If true, check if the command return non-zero. include_stderr: To include stderr in output file or not. Returns: filename SuperIO-based platform has no EC chip, check its existence first. Returns: True if the platform has EC chip. # The system might not have 'ectool' command if the platform has no EC chip. Create abt.txt to provide easier log access. We utilize the ABT browser extension to provide easier log access on browser, which needs an embedded `abt.txt` in the factory bug archive. We don't want to put all the contents into abt.txt since this will generate an archive which size is twice than the original one. By default, all directories are ignored from abt.txt. Extra include files and exclude patterns are listed below. Args: files: The current file list of `SaveLogs`. exclude_patterns: The current exclude patterns of `SaveLogs`. Returns: abt_name: Name of abt file. # These files are listed because we only included their parent # directories which will be ignored in abt.txt. # This command generates files under current directory. # Special case of trimming DRAMK_LOG. DRAMK_LOG is a readable file with some # noise appended, like this: TEXT + 0x00 + (0xff)*N Saves dmesg and relevant log files to a new archive in output_dir. The archive will be named factory_bug.<description>.zip, where description is the 'archive_id' argument (if provided). Args: output_dir: The directory in which to create the file. include_network_log: Whether to include network related logs or not. archive_id: An optional short ID to put in the filename (so archives may be more easily differentiated). probe: True to include probe result in the logs. dram: True to include DRAM calibration logs. abt: True to include abt.txt for Android Bug Tool. var, usr_local, etc: Paths to the relevant directories. Returns: The name of the zip archive joined with `output_dir`. # Just save a dummy zip. # Link these paths so their path will remain the same in the zip archive. # These are hardcoded paths because they are virtual filesystems. The data # we want is always in /dev and /sys, never on the real devices. # Cannot zip an unseekable file, need to manually copy it instead. # Patterns for those files which are excluded from factory_bug. # 1 = non-fatal errors like "some files differ" argparse config Returns: (parser, args) parser: the argparse.ArgumentParser object, export for `parser.error()`. args: parsed command line arguments. Get input paths. See `MountInputDevice`. Get output path.
| 1.819746
| 2
|
servizi/veicoli/views.py
|
l-dfa/django-spese
| 0
|
6625750
|
<filename>servizi/veicoli/views.py
# veicoli/views.py
''' app veicoli views
- add
- change
- index
- detail
- reports
- calculate_gas_consumption
'''
#{ module history
# ldfa @ 2017.01.12 index: + filtering events by django-filter
# ldfa @ 2017.01.11 index: + filtering events by django-filter
# ldfa @ 2016.12.12 change: + transaction.atomic
# change: + check current user against expense user
# detail: + check current user against expense user
# ldfa @ 2016.dec adding change
# ldfa @ 2016.11.12 initial
#}
# python debugging
import pdb
import sys
import logging
log = logging.getLogger(__name__) # log.debug, info, warning, error, critical("Hey there it works!!")
# django managing requests
from django.shortcuts import get_object_or_404, render
from django.http import Http404
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
# django forms
from django.forms import modelform_factory
from django.utils import timezone
# from django.core.exceptions import ValidationError
# from django.utils.datastructures import MultiValueDictKeyError
from django.contrib import messages
# django authorization
from django.contrib.auth.decorators import login_required
# django models
from django.db import transaction
from django.db.models import Max
from django.db.models import Sum
# spese & taggit
from spese.models import Expense, Account
from spese.forms import ExpenseForm
from spese.utils import get_accounts
from spese.views import RepoItem
from taggit.models import Tag
from .models import VEvent, Vehicle, Event
from .forms import EventForm
from .filters import EventFilter
@login_required(login_url="/login/")
def add(request):
page_identification = 'Veicoli: new event'
accounts = Account.objects.filter(users=request.user)
vehicles = Vehicle.objects.filter(user=request.user)
vevents = VEvent.objects.all()
most_probable_vevent = vevents[0] if(vevents) else None
### TRACE ### pdb.set_trace()
km__max = Event.objects.all().aggregate(Max('km'))
last_km = km__max['km__max'] if 'km__max' in km__max else 0
account_selected = None
tags_selected = []
vehicle_selected = None
vevent_selected = None
if request.method == "POST":
form1 = ExpenseForm(request.POST, prefix='form1')
form2 = EventForm(request.POST, prefix='form2')
account_selected = int(request.POST['account'])
tags_selected = request.POST.getlist('choice') # 'getlist' gets [] in case of no choices
vehicle_selected = int(request.POST['vehicle'])
vevent_selected = int(request.POST['vevent'])
if form1.is_valid() and form2.is_valid():
try:
with transaction.atomic():
expense = form1.save(commit=False)
expense.user = request.user
expense.account = Account.objects.get(id=account_selected)
expense.save()
tags = request.POST.getlist('choice') # 'getlist' gets [] in case of no choices
expense.tags.set(*tags, clear=True)
expense.save()
event = form2.save(commit=False)
event.expense = expense
event.vehicle = Vehicle.objects.get(id=vehicle_selected)
event.vevent = VEvent.objects.get(id=vevent_selected)
event.save()
### TRACE ### pdb.set_trace()
msg = 'success creating expense {}, event {} for user {}, vehicle {} '.format(expense.id, event.id, expense.user.username, event.vehicle.name)
log.info(msg)
messages.success(request, msg)
except:
# error: Redisplay the expense change form
msg = 'Error <{}> while trying to create expense'.format(sys.exc_info()[0])
log.error(msg)
messages.error(request, msg)
else:
### TRACE ### pdb.set_trace()
if 'save' in request.POST.keys():
return HttpResponseRedirect(reverse('veicoli:detail', args=(expense.id,)))
else:
form1 = ExpenseForm(initial={
'description': most_probable_vevent.description if(most_probable_vevent) else None,
'date': timezone.now(),
}, prefix='form1')
form2 = EventForm(initial={
'km': last_km,
'unit_cost': 1,
}, prefix='form2')
alltags = Tag.objects.all()
return render(request, 'veicoli/add.html', { 'page_identification': page_identification,
'operation': 'new',
'form1': form1,
'form2': form2,
'accounts': accounts,
'account_selected': account_selected,
'alltags': alltags,
'tags_selected': tags_selected,
'vehicles': vehicles,
'vehicle_selected': vehicle_selected,
'vevents': vevents,
'vevent_selected': vevent_selected,
})
@login_required(login_url="/login/")
@transaction.atomic
def change(request, expense_id):
''' SVILUPPO il tranfer funds non funziona. VERIFICA:
- transfer fund: il cambio di account viene impedito
'''
### TRACE ### pdb.set_trace()
expense = get_object_or_404(Expense, pk=expense_id)
# check expense user == request user, othewise bail out
if expense.user != request.user:
msg = "{}: access to expense id {} denied".format( request.user.username, expense.pk )
log.error(msg)
messages.error(request, msg)
return HttpResponseRedirect(reverse('veicoli:index'))
event = get_object_or_404(Event, expense__pk=expense_id)
page_identification = 'Veicoli: edit expense detail'
accounts = Account.objects.filter(users=request.user)
account_selected = expense.account.pk
vehicles = Vehicle.objects.filter(user=request.user)
vehicle_selected = event.vehicle.pk
vevents = VEvent.objects.all() # possible veicle events
vevent_selected = event.vevent.pk
tags_selected = expense.tags.names
if request.method == "POST":
form1 = ExpenseForm(request.POST, instance=expense, prefix='form1')
form2 = EventForm(request.POST, instance=event, prefix='form2')
account_selected = int(request.POST['account'])
tags_selected = request.POST.getlist('choice') # 'getlist' gets [] in case of no choices
vehicle_selected = int(request.POST['vehicle'])
vevent_selected = int(request.POST['vevent'])
if form1.is_valid() and form2.is_valid():
try:
with transaction.atomic():
expense = form1.save(commit=False)
expense.user = request.user
expense.account = Account.objects.get(id=account_selected)
expense.save()
expense.tags.set(*tags_selected, clear=True)
expense.save()
event = form2.save(commit=False)
event.expense = expense
event.vehicle = Vehicle.objects.get(id=vehicle_selected)
event.vevent = VEvent.objects.get(id=vevent_selected)
event.save()
msg = "{}: success modifying event {}/{}, for vehicle {}".format( request.user.username,
event.pk,
event.expense.pk,
event.vehicle.name
)
log.info(msg)
messages.success(request, msg)
except:
# error: Redisplay the expense change form
msg = 'Error <{}> while trying to change event {}/{}'.format(sys.exc_info()[0], event.id, expense.id)
log.error(msg)
messages.error(request, msg)
else:
if 'save' in request.POST.keys():
return HttpResponseRedirect(reverse('veicoli:detail', args=(event.expense.id,)))
else:
form1 = ExpenseForm(instance=expense, prefix='form1')
form2 = EventForm(instance=event, prefix='form2')
alltags = Tag.objects.all()
# if other_expense:
# messages.info(request, "warning: this is a transfer fund, these changes will affect also its companion")
# messages.info(request, "warning: this is a transfer fund, changes to account will not be accepted")
return render(request, 'veicoli/add.html', { 'page_identification': page_identification,
'operation': 'edit',
'form1': form1,
'form2': form2,
'accounts': accounts,
'account_selected': account_selected,
'alltags': alltags,
'tags_selected': tags_selected,
'vehicles': vehicles,
'vehicle_selected': vehicle_selected,
'vevents': vevents,
'vevent_selected': vevent_selected,
})
@login_required(login_url='/login/')
def index(request):
page_identification = 'Veicoli'
# event_expense_list = [event.expense.pk for event in Event.objects.filter(expense__user=request.user).order_by('-expense__date')]
# event_list = Event.objects.filter(expense__user=request.user).order_by('vehicle', '-expense__date') ####### SVILUPPO
event_list = EventFilter(request.GET, request=request, queryset=Event.objects.filter(expense__user=request.user))
### TRACE ### pdb.set_trace()
return render(request, 'veicoli/index.html', {'page_identification': page_identification,
'event_list': event_list,
}
)
@login_required(login_url='/login/')
def reports(request):
page_identification = 'Veicoli: reports'
# event_expense_list = [event.expense.pk for event in Event.objects.filter(expense__user=request.user).order_by('-expense__date')]
# event_list = Event.objects.filter(expense__user=request.user).order_by('vehicle', '-expense__date') ####### SVILUPPO
event_list = EventFilter(request.GET, request=request, queryset=Event.objects.filter(expense__user=request.user))
lgc = calculate_gas_consumption(event_list) # list gas consumption
lrv = repo_vehicles(event_list) # list repo vehicles
lret = repo_event_type(event_list) # list repo vehicles
### TRACE ### pdb.set_trace()
return render(request, 'veicoli/reports.html', { 'page_identification': page_identification,
'list_gas_consumption': lgc,
'list_repo_vehicles': lrv,
'list_repo_event_type': lret,
'event_list': event_list,
}
)
def repo_event_type(el):
''' in and out expense by event type,
- el, expenses (query)list
'''
vevents = [e.vevent for e in el.qs]
vevents = list(set(vevents))
# pdb.set_trace()
list_event_type = []
total_in = 0
total_out = 0
for item in vevents:
# in veicoli there aren't transfer funds
# tfs = TransferFund.objects.values_list('source', flat=True)
# tfd = TransferFund.objects.values_list('destination', flat=True)
# get income (>0)
sum = el.qs.filter(vevent=item, expense__amount__gt=0)
sum = sum.aggregate(Sum("expense__amount"))
item.positive = sum["expense__amount__sum"] if sum and sum["expense__amount__sum"] else 0
# get outcome (<0)
sum = el.qs.filter(vevent=item, expense__amount__lt=0)
sum = sum.aggregate(Sum("expense__amount"))
item.negative = sum["expense__amount__sum"] if sum and sum["expense__amount__sum"] else 0
ri = RepoItem(item.name, item.positive, item.negative)
total_in += item.positive
total_out += item.negative
list_event_type.append(ri)
rt = RepoItem('totals', total_in, total_out)
list_event_type.append(rt)
return list_event_type
def repo_vehicles(el):
''' in and out expense by vehicles,
- el, expenses (query)list
'''
vehicles = [e.vehicle for e in el.qs]
vehicles = list(set(vehicles))
# pdb.set_trace()
list_vehicles = []
total_in = 0
total_out = 0
for item in vehicles:
# in veicoli there aren't transfer funds
# tfs = TransferFund.objects.values_list('source', flat=True)
# tfd = TransferFund.objects.values_list('destination', flat=True)
# get income (>0)
sum = el.qs.filter(vehicle=item, expense__amount__gt=0)
sum = sum.aggregate(Sum("expense__amount"))
item.positive = sum["expense__amount__sum"] if sum and sum["expense__amount__sum"] else 0
# get outcome (<0)
sum = el.qs.filter(vehicle=item, expense__amount__lt=0)
sum = sum.aggregate(Sum("expense__amount"))
item.negative = sum["expense__amount__sum"] if sum and sum["expense__amount__sum"] else 0
ri = RepoItem(item.name, item.positive, item.negative)
total_in += item.positive
total_out += item.negative
list_vehicles.append(ri)
rt = RepoItem('totals', total_in, total_out)
list_vehicles.append(rt)
return list_vehicles
class Consumption(object):
def __init__(self, name, initial_km, final_km, fuel_quantity):
self.name = name
self.initial_km = initial_km
self.final_km = final_km
self.km = final_km - initial_km
self.fuel_quantity = round(fuel_quantity, 2)
self.unit_consumption = round( (self.final_km - self.initial_km) / self.fuel_quantity, 2 ) if self.fuel_quantity>0 else 0
def __str__(self):
"""return name"""
return "{0}: {} km/l on {} km".format(self.name, self.unit_consumption, self.km)
def calculate_gas_consumption(el):
""" from event list return dict with v.name/v.gas consumption (Km/l)
algorithm:
- get list of vehicles
- foreach vehicle.
- get all fuel events+expenses, ordered by expense date
- + calc.liters of fuel: expense__amount/unit_cost
- then sum all liters and calc. max km/sum_of_liters
"""
vehicles = [e.vehicle for e in el.qs]
vehicles = list(set(vehicles))
result = []
for vehicle in vehicles:
lel = el.qs.filter(vehicle=vehicle, vevent__name='fuel').order_by('expense__date')
fuel_quantity = 0
for e in lel:
l = -e.expense.amount / e.unit_cost if e.unit_cost and e.unit_cost>0 else 0
fuel_quantity += l
### TRACE ### pdb.set_trace()
final_km = lel[len(lel)-1].km
initial_km = 0
try:
purchase = Event.objects.get(vehicle=vehicle, vevent__name='purchase')
initial_km = purchase.km
except:
pass
initial_km = lel[0].km if lel and initial_km==0 else initial_km
consumption = Consumption(name=vehicle.name, initial_km=initial_km, final_km=final_km, fuel_quantity=fuel_quantity)
result.append(consumption)
return result
@login_required(login_url="login/")
def detail(request, expense_id):
#pdb.set_trace()
expense = get_object_or_404(Expense, pk=expense_id)
# check expense user == request user, othewise bail out
if expense.user != request.user:
msg = "{}: access to expense id {} denied".format( request.user.username, expense.pk )
log.error(msg)
messages.error(request, msg)
return HttpResponseRedirect(reverse('veicoli:index'))
event = get_object_or_404(Event, expense=expense_id)
page_identification = 'Veicoli: show event detail'
if not expense.user == request.user:
msg = "expense id {}: wrong user (it's {})".format(expense.id, expense.user.username)
log.error(msg)
messages.error(request, msg)
return HttpResponseRedirect(reverse('veicoli:index'))
return render(request, 'veicoli/detail.html', {'page_identification': page_identification,
'operation': 'show',
'expense': expense,
'event': event,
}
)
|
<filename>servizi/veicoli/views.py
# veicoli/views.py
''' app veicoli views
- add
- change
- index
- detail
- reports
- calculate_gas_consumption
'''
#{ module history
# ldfa @ 2017.01.12 index: + filtering events by django-filter
# ldfa @ 2017.01.11 index: + filtering events by django-filter
# ldfa @ 2016.12.12 change: + transaction.atomic
# change: + check current user against expense user
# detail: + check current user against expense user
# ldfa @ 2016.dec adding change
# ldfa @ 2016.11.12 initial
#}
# python debugging
import pdb
import sys
import logging
log = logging.getLogger(__name__) # log.debug, info, warning, error, critical("Hey there it works!!")
# django managing requests
from django.shortcuts import get_object_or_404, render
from django.http import Http404
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
# django forms
from django.forms import modelform_factory
from django.utils import timezone
# from django.core.exceptions import ValidationError
# from django.utils.datastructures import MultiValueDictKeyError
from django.contrib import messages
# django authorization
from django.contrib.auth.decorators import login_required
# django models
from django.db import transaction
from django.db.models import Max
from django.db.models import Sum
# spese & taggit
from spese.models import Expense, Account
from spese.forms import ExpenseForm
from spese.utils import get_accounts
from spese.views import RepoItem
from taggit.models import Tag
from .models import VEvent, Vehicle, Event
from .forms import EventForm
from .filters import EventFilter
@login_required(login_url="/login/")
def add(request):
page_identification = 'Veicoli: new event'
accounts = Account.objects.filter(users=request.user)
vehicles = Vehicle.objects.filter(user=request.user)
vevents = VEvent.objects.all()
most_probable_vevent = vevents[0] if(vevents) else None
### TRACE ### pdb.set_trace()
km__max = Event.objects.all().aggregate(Max('km'))
last_km = km__max['km__max'] if 'km__max' in km__max else 0
account_selected = None
tags_selected = []
vehicle_selected = None
vevent_selected = None
if request.method == "POST":
form1 = ExpenseForm(request.POST, prefix='form1')
form2 = EventForm(request.POST, prefix='form2')
account_selected = int(request.POST['account'])
tags_selected = request.POST.getlist('choice') # 'getlist' gets [] in case of no choices
vehicle_selected = int(request.POST['vehicle'])
vevent_selected = int(request.POST['vevent'])
if form1.is_valid() and form2.is_valid():
try:
with transaction.atomic():
expense = form1.save(commit=False)
expense.user = request.user
expense.account = Account.objects.get(id=account_selected)
expense.save()
tags = request.POST.getlist('choice') # 'getlist' gets [] in case of no choices
expense.tags.set(*tags, clear=True)
expense.save()
event = form2.save(commit=False)
event.expense = expense
event.vehicle = Vehicle.objects.get(id=vehicle_selected)
event.vevent = VEvent.objects.get(id=vevent_selected)
event.save()
### TRACE ### pdb.set_trace()
msg = 'success creating expense {}, event {} for user {}, vehicle {} '.format(expense.id, event.id, expense.user.username, event.vehicle.name)
log.info(msg)
messages.success(request, msg)
except:
# error: Redisplay the expense change form
msg = 'Error <{}> while trying to create expense'.format(sys.exc_info()[0])
log.error(msg)
messages.error(request, msg)
else:
### TRACE ### pdb.set_trace()
if 'save' in request.POST.keys():
return HttpResponseRedirect(reverse('veicoli:detail', args=(expense.id,)))
else:
form1 = ExpenseForm(initial={
'description': most_probable_vevent.description if(most_probable_vevent) else None,
'date': timezone.now(),
}, prefix='form1')
form2 = EventForm(initial={
'km': last_km,
'unit_cost': 1,
}, prefix='form2')
alltags = Tag.objects.all()
return render(request, 'veicoli/add.html', { 'page_identification': page_identification,
'operation': 'new',
'form1': form1,
'form2': form2,
'accounts': accounts,
'account_selected': account_selected,
'alltags': alltags,
'tags_selected': tags_selected,
'vehicles': vehicles,
'vehicle_selected': vehicle_selected,
'vevents': vevents,
'vevent_selected': vevent_selected,
})
@login_required(login_url="/login/")
@transaction.atomic
def change(request, expense_id):
''' SVILUPPO il tranfer funds non funziona. VERIFICA:
- transfer fund: il cambio di account viene impedito
'''
### TRACE ### pdb.set_trace()
expense = get_object_or_404(Expense, pk=expense_id)
# check expense user == request user, othewise bail out
if expense.user != request.user:
msg = "{}: access to expense id {} denied".format( request.user.username, expense.pk )
log.error(msg)
messages.error(request, msg)
return HttpResponseRedirect(reverse('veicoli:index'))
event = get_object_or_404(Event, expense__pk=expense_id)
page_identification = 'Veicoli: edit expense detail'
accounts = Account.objects.filter(users=request.user)
account_selected = expense.account.pk
vehicles = Vehicle.objects.filter(user=request.user)
vehicle_selected = event.vehicle.pk
vevents = VEvent.objects.all() # possible veicle events
vevent_selected = event.vevent.pk
tags_selected = expense.tags.names
if request.method == "POST":
form1 = ExpenseForm(request.POST, instance=expense, prefix='form1')
form2 = EventForm(request.POST, instance=event, prefix='form2')
account_selected = int(request.POST['account'])
tags_selected = request.POST.getlist('choice') # 'getlist' gets [] in case of no choices
vehicle_selected = int(request.POST['vehicle'])
vevent_selected = int(request.POST['vevent'])
if form1.is_valid() and form2.is_valid():
try:
with transaction.atomic():
expense = form1.save(commit=False)
expense.user = request.user
expense.account = Account.objects.get(id=account_selected)
expense.save()
expense.tags.set(*tags_selected, clear=True)
expense.save()
event = form2.save(commit=False)
event.expense = expense
event.vehicle = Vehicle.objects.get(id=vehicle_selected)
event.vevent = VEvent.objects.get(id=vevent_selected)
event.save()
msg = "{}: success modifying event {}/{}, for vehicle {}".format( request.user.username,
event.pk,
event.expense.pk,
event.vehicle.name
)
log.info(msg)
messages.success(request, msg)
except:
# error: Redisplay the expense change form
msg = 'Error <{}> while trying to change event {}/{}'.format(sys.exc_info()[0], event.id, expense.id)
log.error(msg)
messages.error(request, msg)
else:
if 'save' in request.POST.keys():
return HttpResponseRedirect(reverse('veicoli:detail', args=(event.expense.id,)))
else:
form1 = ExpenseForm(instance=expense, prefix='form1')
form2 = EventForm(instance=event, prefix='form2')
alltags = Tag.objects.all()
# if other_expense:
# messages.info(request, "warning: this is a transfer fund, these changes will affect also its companion")
# messages.info(request, "warning: this is a transfer fund, changes to account will not be accepted")
return render(request, 'veicoli/add.html', { 'page_identification': page_identification,
'operation': 'edit',
'form1': form1,
'form2': form2,
'accounts': accounts,
'account_selected': account_selected,
'alltags': alltags,
'tags_selected': tags_selected,
'vehicles': vehicles,
'vehicle_selected': vehicle_selected,
'vevents': vevents,
'vevent_selected': vevent_selected,
})
@login_required(login_url='/login/')
def index(request):
page_identification = 'Veicoli'
# event_expense_list = [event.expense.pk for event in Event.objects.filter(expense__user=request.user).order_by('-expense__date')]
# event_list = Event.objects.filter(expense__user=request.user).order_by('vehicle', '-expense__date') ####### SVILUPPO
event_list = EventFilter(request.GET, request=request, queryset=Event.objects.filter(expense__user=request.user))
### TRACE ### pdb.set_trace()
return render(request, 'veicoli/index.html', {'page_identification': page_identification,
'event_list': event_list,
}
)
@login_required(login_url='/login/')
def reports(request):
page_identification = 'Veicoli: reports'
# event_expense_list = [event.expense.pk for event in Event.objects.filter(expense__user=request.user).order_by('-expense__date')]
# event_list = Event.objects.filter(expense__user=request.user).order_by('vehicle', '-expense__date') ####### SVILUPPO
event_list = EventFilter(request.GET, request=request, queryset=Event.objects.filter(expense__user=request.user))
lgc = calculate_gas_consumption(event_list) # list gas consumption
lrv = repo_vehicles(event_list) # list repo vehicles
lret = repo_event_type(event_list) # list repo vehicles
### TRACE ### pdb.set_trace()
return render(request, 'veicoli/reports.html', { 'page_identification': page_identification,
'list_gas_consumption': lgc,
'list_repo_vehicles': lrv,
'list_repo_event_type': lret,
'event_list': event_list,
}
)
def repo_event_type(el):
''' in and out expense by event type,
- el, expenses (query)list
'''
vevents = [e.vevent for e in el.qs]
vevents = list(set(vevents))
# pdb.set_trace()
list_event_type = []
total_in = 0
total_out = 0
for item in vevents:
# in veicoli there aren't transfer funds
# tfs = TransferFund.objects.values_list('source', flat=True)
# tfd = TransferFund.objects.values_list('destination', flat=True)
# get income (>0)
sum = el.qs.filter(vevent=item, expense__amount__gt=0)
sum = sum.aggregate(Sum("expense__amount"))
item.positive = sum["expense__amount__sum"] if sum and sum["expense__amount__sum"] else 0
# get outcome (<0)
sum = el.qs.filter(vevent=item, expense__amount__lt=0)
sum = sum.aggregate(Sum("expense__amount"))
item.negative = sum["expense__amount__sum"] if sum and sum["expense__amount__sum"] else 0
ri = RepoItem(item.name, item.positive, item.negative)
total_in += item.positive
total_out += item.negative
list_event_type.append(ri)
rt = RepoItem('totals', total_in, total_out)
list_event_type.append(rt)
return list_event_type
def repo_vehicles(el):
''' in and out expense by vehicles,
- el, expenses (query)list
'''
vehicles = [e.vehicle for e in el.qs]
vehicles = list(set(vehicles))
# pdb.set_trace()
list_vehicles = []
total_in = 0
total_out = 0
for item in vehicles:
# in veicoli there aren't transfer funds
# tfs = TransferFund.objects.values_list('source', flat=True)
# tfd = TransferFund.objects.values_list('destination', flat=True)
# get income (>0)
sum = el.qs.filter(vehicle=item, expense__amount__gt=0)
sum = sum.aggregate(Sum("expense__amount"))
item.positive = sum["expense__amount__sum"] if sum and sum["expense__amount__sum"] else 0
# get outcome (<0)
sum = el.qs.filter(vehicle=item, expense__amount__lt=0)
sum = sum.aggregate(Sum("expense__amount"))
item.negative = sum["expense__amount__sum"] if sum and sum["expense__amount__sum"] else 0
ri = RepoItem(item.name, item.positive, item.negative)
total_in += item.positive
total_out += item.negative
list_vehicles.append(ri)
rt = RepoItem('totals', total_in, total_out)
list_vehicles.append(rt)
return list_vehicles
class Consumption(object):
def __init__(self, name, initial_km, final_km, fuel_quantity):
self.name = name
self.initial_km = initial_km
self.final_km = final_km
self.km = final_km - initial_km
self.fuel_quantity = round(fuel_quantity, 2)
self.unit_consumption = round( (self.final_km - self.initial_km) / self.fuel_quantity, 2 ) if self.fuel_quantity>0 else 0
def __str__(self):
"""return name"""
return "{0}: {} km/l on {} km".format(self.name, self.unit_consumption, self.km)
def calculate_gas_consumption(el):
""" from event list return dict with v.name/v.gas consumption (Km/l)
algorithm:
- get list of vehicles
- foreach vehicle.
- get all fuel events+expenses, ordered by expense date
- + calc.liters of fuel: expense__amount/unit_cost
- then sum all liters and calc. max km/sum_of_liters
"""
vehicles = [e.vehicle for e in el.qs]
vehicles = list(set(vehicles))
result = []
for vehicle in vehicles:
lel = el.qs.filter(vehicle=vehicle, vevent__name='fuel').order_by('expense__date')
fuel_quantity = 0
for e in lel:
l = -e.expense.amount / e.unit_cost if e.unit_cost and e.unit_cost>0 else 0
fuel_quantity += l
### TRACE ### pdb.set_trace()
final_km = lel[len(lel)-1].km
initial_km = 0
try:
purchase = Event.objects.get(vehicle=vehicle, vevent__name='purchase')
initial_km = purchase.km
except:
pass
initial_km = lel[0].km if lel and initial_km==0 else initial_km
consumption = Consumption(name=vehicle.name, initial_km=initial_km, final_km=final_km, fuel_quantity=fuel_quantity)
result.append(consumption)
return result
@login_required(login_url="login/")
def detail(request, expense_id):
#pdb.set_trace()
expense = get_object_or_404(Expense, pk=expense_id)
# check expense user == request user, othewise bail out
if expense.user != request.user:
msg = "{}: access to expense id {} denied".format( request.user.username, expense.pk )
log.error(msg)
messages.error(request, msg)
return HttpResponseRedirect(reverse('veicoli:index'))
event = get_object_or_404(Event, expense=expense_id)
page_identification = 'Veicoli: show event detail'
if not expense.user == request.user:
msg = "expense id {}: wrong user (it's {})".format(expense.id, expense.user.username)
log.error(msg)
messages.error(request, msg)
return HttpResponseRedirect(reverse('veicoli:index'))
return render(request, 'veicoli/detail.html', {'page_identification': page_identification,
'operation': 'show',
'expense': expense,
'event': event,
}
)
|
en
| 0.64081
|
# veicoli/views.py app veicoli views - add - change - index - detail - reports - calculate_gas_consumption #{ module history # ldfa @ 2017.01.12 index: + filtering events by django-filter # ldfa @ 2017.01.11 index: + filtering events by django-filter # ldfa @ 2016.12.12 change: + transaction.atomic # change: + check current user against expense user # detail: + check current user against expense user # ldfa @ 2016.dec adding change # ldfa @ 2016.11.12 initial #} # python debugging # log.debug, info, warning, error, critical("Hey there it works!!") # django managing requests # django forms # from django.core.exceptions import ValidationError # from django.utils.datastructures import MultiValueDictKeyError # django authorization # django models # spese & taggit ### TRACE ### pdb.set_trace() # 'getlist' gets [] in case of no choices # 'getlist' gets [] in case of no choices ### TRACE ### pdb.set_trace() # error: Redisplay the expense change form ### TRACE ### pdb.set_trace() SVILUPPO il tranfer funds non funziona. VERIFICA: - transfer fund: il cambio di account viene impedito ### TRACE ### pdb.set_trace() # check expense user == request user, othewise bail out # possible veicle events # 'getlist' gets [] in case of no choices # error: Redisplay the expense change form # if other_expense: # messages.info(request, "warning: this is a transfer fund, these changes will affect also its companion") # messages.info(request, "warning: this is a transfer fund, changes to account will not be accepted") # event_expense_list = [event.expense.pk for event in Event.objects.filter(expense__user=request.user).order_by('-expense__date')] # event_list = Event.objects.filter(expense__user=request.user).order_by('vehicle', '-expense__date') ####### SVILUPPO ### TRACE ### pdb.set_trace() # event_expense_list = [event.expense.pk for event in Event.objects.filter(expense__user=request.user).order_by('-expense__date')] # event_list = Event.objects.filter(expense__user=request.user).order_by('vehicle', '-expense__date') ####### SVILUPPO # list gas consumption # list repo vehicles # list repo vehicles ### TRACE ### pdb.set_trace() in and out expense by event type, - el, expenses (query)list # pdb.set_trace() # in veicoli there aren't transfer funds # tfs = TransferFund.objects.values_list('source', flat=True) # tfd = TransferFund.objects.values_list('destination', flat=True) # get income (>0) # get outcome (<0) in and out expense by vehicles, - el, expenses (query)list # pdb.set_trace() # in veicoli there aren't transfer funds # tfs = TransferFund.objects.values_list('source', flat=True) # tfd = TransferFund.objects.values_list('destination', flat=True) # get income (>0) # get outcome (<0) return name from event list return dict with v.name/v.gas consumption (Km/l) algorithm: - get list of vehicles - foreach vehicle. - get all fuel events+expenses, ordered by expense date - + calc.liters of fuel: expense__amount/unit_cost - then sum all liters and calc. max km/sum_of_liters ### TRACE ### pdb.set_trace() #pdb.set_trace() # check expense user == request user, othewise bail out
| 1.840086
| 2
|