max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
famplex/html/__init__.py | steppi/famplex | 11 | 6613851 | <reponame>steppi/famplex<filename>famplex/html/__init__.py
# -*- coding: utf-8 -*-
"""Export FamPlex as a static site."""
| # -*- coding: utf-8 -*-
"""Export FamPlex as a static site.""" | en | 0.884583 | # -*- coding: utf-8 -*- Export FamPlex as a static site. | 1.042736 | 1 |
dashboard/urls.py | Extroverted-introvert/carolina_data_challenge | 0 | 6613852 | from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from . import views
app_name = 'dashboard'
urlpatterns = [
# route is a string contains a URL pattern
# view refers to the view function
# name the URL
path(route='', view=views.index, name='index'),
path('registration/', views.registration_request, name='registration'),
path('login/', views.login_request, name='login'),
path('logout/', views.logout_request, name='logout'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from . import views
app_name = 'dashboard'
urlpatterns = [
# route is a string contains a URL pattern
# view refers to the view function
# name the URL
path(route='', view=views.index, name='index'),
path('registration/', views.registration_request, name='registration'),
path('login/', views.login_request, name='login'),
path('logout/', views.logout_request, name='logout'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| en | 0.761151 | # route is a string contains a URL pattern # view refers to the view function # name the URL | 1.997709 | 2 |
Model/config_reader.py | m1258218761/p-score | 7 | 6613853 | <reponame>m1258218761/p-score
# -*- coding:utf-8 -*-
# author:mx
# datetime:2020/4/25 15:06
# email:<EMAIL>
import configparser
class MyParser(configparser.ConfigParser):
def as_dict(self):
d = dict(self._sections)
for k in d:
d[k] = dict(d[k])
return d
| # -*- coding:utf-8 -*-
# author:mx
# datetime:2020/4/25 15:06
# email:<EMAIL>
import configparser
class MyParser(configparser.ConfigParser):
def as_dict(self):
d = dict(self._sections)
for k in d:
d[k] = dict(d[k])
return d | en | 0.365102 | # -*- coding:utf-8 -*- # author:mx # datetime:2020/4/25 15:06 # email:<EMAIL> | 2.858514 | 3 |
chapter09/read_big_file.py | ScorpioDoctor/AdvancePythonMy | 0 | 6613854 | from random import randint, Random
# 产生指定长度的随机字符串
def random_str(random_length=8):
str = ''
chars = 'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz0123456789'
length = len(chars) - 1
random = Random()
for i in range(random_length):
str += chars[random.randint(0, length)]
return str
# 写随机字符串数据到指定的文件
def write_data_to_file(file_name="oooxxx.txt", maxlines=100):
with open(file_name, encoding="utf8", mode="a") as f_open:
for line in range(maxlines):
f_open.writelines([random_str(randint(20, 80)), '<||>'])
# 根据分隔符按行读取被写成一行的大文件
def read_big_file(f, spliter):
buf = ""
while True:
# 检测buf里面有没有分隔符spliter,
# 如果有,说明当前buf里面还有完整的行,不断的输出这些行
while spliter in buf:
pos = buf.index(spliter)
yield buf[0:pos]
#把已经输出的行给冲掉
buf = buf[pos + len(spliter):]
#如果上面的buf没有完整的行,就接着读取文件内容
chunk = f.read(4096)# 有可能包含多行内容,也有可能包含不完整的行
if not chunk:
yield buf
break
else:
buf += chunk
if __name__ == "__main__":
# write_data_to_file("bigfile.txt", maxlines=100)
with open("bigfile.txt",mode='r') as f:
gen = read_big_file(f,'<||>')
for line in gen:
print(line) | from random import randint, Random
# 产生指定长度的随机字符串
def random_str(random_length=8):
str = ''
chars = 'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz0123456789'
length = len(chars) - 1
random = Random()
for i in range(random_length):
str += chars[random.randint(0, length)]
return str
# 写随机字符串数据到指定的文件
def write_data_to_file(file_name="oooxxx.txt", maxlines=100):
with open(file_name, encoding="utf8", mode="a") as f_open:
for line in range(maxlines):
f_open.writelines([random_str(randint(20, 80)), '<||>'])
# 根据分隔符按行读取被写成一行的大文件
def read_big_file(f, spliter):
buf = ""
while True:
# 检测buf里面有没有分隔符spliter,
# 如果有,说明当前buf里面还有完整的行,不断的输出这些行
while spliter in buf:
pos = buf.index(spliter)
yield buf[0:pos]
#把已经输出的行给冲掉
buf = buf[pos + len(spliter):]
#如果上面的buf没有完整的行,就接着读取文件内容
chunk = f.read(4096)# 有可能包含多行内容,也有可能包含不完整的行
if not chunk:
yield buf
break
else:
buf += chunk
if __name__ == "__main__":
# write_data_to_file("bigfile.txt", maxlines=100)
with open("bigfile.txt",mode='r') as f:
gen = read_big_file(f,'<||>')
for line in gen:
print(line) | zh | 0.964607 | # 产生指定长度的随机字符串 # 写随机字符串数据到指定的文件 # 根据分隔符按行读取被写成一行的大文件 # 检测buf里面有没有分隔符spliter, # 如果有,说明当前buf里面还有完整的行,不断的输出这些行 #把已经输出的行给冲掉 #如果上面的buf没有完整的行,就接着读取文件内容 # 有可能包含多行内容,也有可能包含不完整的行 # write_data_to_file("bigfile.txt", maxlines=100) | 3.192056 | 3 |
Utils/decorators.py | philshams/FC_analysis | 0 | 6613855 | <reponame>philshams/FC_analysis
import time
import functools
from Config import verbose
def clock(func):
""" Decorator to time the execution of a function and print the result"""
@functools.wraps(func)
def clocked(self, *args):
t0 = time.perf_counter()
result = func(self, *args)
elapsed = time.perf_counter() - t0
if verbose:
name = func.__name__
arg_str = ', '.join(repr(arg) for arg in args)
spaces = ' '*(40-len(name))
print(' .. {} in{} --> {}s'.format(name, spaces, round(elapsed, 4)))
return result
return clocked
def clock_noself(func):
""" Decorator to time the execution of a function and print the result"""
def clocked(*args):
t0 = time.perf_counter()
result = func(*args)
elapsed = time.perf_counter() - t0
name = func.__name__
spaces = ' '*(40-len(name))
print(' .. {} in{} --> {}s'.format(name, spaces, round(elapsed, 4)))
return result
return clocked
def register(registry:list):
""" Decorator to add a function to a given list of functions """
def decorate(func):
registry.append(func)
return func
return decorate
| import time
import functools
from Config import verbose
def clock(func):
""" Decorator to time the execution of a function and print the result"""
@functools.wraps(func)
def clocked(self, *args):
t0 = time.perf_counter()
result = func(self, *args)
elapsed = time.perf_counter() - t0
if verbose:
name = func.__name__
arg_str = ', '.join(repr(arg) for arg in args)
spaces = ' '*(40-len(name))
print(' .. {} in{} --> {}s'.format(name, spaces, round(elapsed, 4)))
return result
return clocked
def clock_noself(func):
""" Decorator to time the execution of a function and print the result"""
def clocked(*args):
t0 = time.perf_counter()
result = func(*args)
elapsed = time.perf_counter() - t0
name = func.__name__
spaces = ' '*(40-len(name))
print(' .. {} in{} --> {}s'.format(name, spaces, round(elapsed, 4)))
return result
return clocked
def register(registry:list):
""" Decorator to add a function to a given list of functions """
def decorate(func):
registry.append(func)
return func
return decorate | en | 0.760648 | Decorator to time the execution of a function and print the result Decorator to time the execution of a function and print the result Decorator to add a function to a given list of functions | 3.988227 | 4 |
tests/test_logging.py | poyo46/lilili | 0 | 6613856 | <filename>tests/test_logging.py
import logging
from lilili import __title__
logging.basicConfig(
filename=f"{__title__}.log",
format="\t".join(
(
"%(asctime)s",
"%(levelname)s",
"%(name)s",
"%(funcName)s",
"%(lineno)d",
"%(message)s",
)
),
level=logging.DEBUG,
)
| <filename>tests/test_logging.py
import logging
from lilili import __title__
logging.basicConfig(
filename=f"{__title__}.log",
format="\t".join(
(
"%(asctime)s",
"%(levelname)s",
"%(name)s",
"%(funcName)s",
"%(lineno)d",
"%(message)s",
)
),
level=logging.DEBUG,
)
| none | 1 | 2.017305 | 2 | |
mathics/builtin/atomic/numbers.py | adamantinum/mathics-core | 0 | 6613857 | <gh_stars>0
# cython: language_level=3
# -*- coding: utf-8 -*-
# Note: docstring is flowed in documentation. Line breaks in the docstring will appear in the
# printed output, so be carful not to add then mid-sentence.
"""
Representation of Numbers
Integers and Real numbers with any number of digits, automatically tagging numerical preceision when appropriate.
Precision is not "guarded" through the evaluation process. Only integer precision is supported.
However, things like 'N[Pi, 100]' should work as expected.
"""
import sympy
import mpmath
from functools import lru_cache
from mathics.builtin.base import Builtin, Predefined, Test
from mathics.core.evaluators import apply_N
from mathics.core.expression import Expression
from mathics.core.symbols import (
Symbol,
SymbolFalse,
SymbolList,
SymbolTrue,
)
from mathics.core.atoms import (
Integer,
Integer0,
MachineReal,
Number,
Rational,
Real,
from_python,
)
from mathics.core.number import (
dps,
convert_int_to_digit_list,
machine_precision,
machine_epsilon,
)
from mathics.core.attributes import (
listable,
protected,
)
@lru_cache(maxsize=1024)
def log_n_b(py_n, py_b) -> int:
return int(mpmath.ceil(mpmath.log(py_n, py_b))) if py_n != 0 and py_n != 1 else 1
def check_finite_decimal(denominator):
# The rational number is finite decimal if the denominator has form 2^a * 5^b
while denominator % 5 == 0:
denominator = denominator / 5
while denominator % 2 == 0:
denominator = denominator / 2
return True if denominator == 1 else False
def convert_repeating_decimal(numerator, denominator, base):
head = [x for x in str(numerator // denominator)]
tails = []
subresults = [numerator % denominator]
numerator %= denominator
while numerator != 0: # only rational input can go to this case
numerator *= base
result_digit, numerator = divmod(numerator, denominator)
tails.append(str(result_digit))
if numerator not in subresults:
subresults.append(numerator)
else:
break
for i in range(len(head) - 1, -1, -1):
j = len(tails) - 1
if head[i] != tails[j]:
break
else:
del tails[j]
tails.insert(0, head[i])
del head[i]
j = j - 1
# truncate all leading 0's
if all(elem == "0" for elem in head):
for i in range(0, len(tails)):
if tails[0] == "0":
tails = tails[1:] + [str(0)]
else:
break
return (head, tails)
def convert_float_base(x, base, precision=10):
length_of_int = 0 if x == 0 else int(mpmath.log(x, base))
# iexps = list(range(length_of_int, -1, -1))
def convert_int(x, base, exponents):
out = []
for e in range(0, exponents + 1):
d = x % base
out.append(d)
x = x / base
if x == 0:
break
out.reverse()
return out
def convert_float(x, base, exponents):
out = []
for e in range(0, exponents):
d = int(x * base)
out.append(d)
x = (x * base) - d
if x == 0:
break
return out
int_part = convert_int(int(x), base, length_of_int)
if isinstance(x, (float, sympy.Float)):
# fexps = list(range(-1, -int(precision + 1), -1))
real_part = convert_float(x - int(x), base, precision + 1)
return int_part + real_part
elif isinstance(x, int):
return int_part
else:
raise TypeError(x)
class ExactNumberQ(Test):
"""
<dl>
<dt>'ExactNumberQ[$expr$]'
<dd>returns 'True' if $expr$ is an exact number, and 'False' otherwise.
</dl>
>> ExactNumberQ[10]
= True
>> ExactNumberQ[4.0]
= False
>> ExactNumberQ[n]
= False
'ExactNumberQ' can be applied to complex numbers:
>> ExactNumberQ[1 + I]
= True
>> ExactNumberQ[1 + 1. I]
= False
"""
summary_text = "test if an expression is an exact real or complex number"
def test(self, expr):
return isinstance(expr, Number) and not expr.is_inexact()
class IntegerDigits(Builtin):
"""
<dl>
<dt>'IntegerDigits[$n$]'
<dd>returns a list of the base-10 digits in the integer $n$.
<dt>'IntegerDigits[$n$, $base$]'
<dd>returns a list of the base-$base$ digits in $n$.
<dt>'IntegerDigits[$n$, $base$, $length$]'
<dd>returns a list of length $length$, truncating or padding
with zeroes on the left as necessary.
</dl>
>> IntegerDigits[76543]
= {7, 6, 5, 4, 3}
The sign of $n$ is discarded:
>> IntegerDigits[-76543]
= {7, 6, 5, 4, 3}
>> IntegerDigits[15, 16]
= {15}
>> IntegerDigits[1234, 16]
= {4, 13, 2}
>> IntegerDigits[1234, 10, 5]
= {0, 1, 2, 3, 4}
#> IntegerDigits[1000, 10]
= {1, 0, 0, 0}
#> IntegerDigits[0]
= {0}
"""
attributes = listable | protected
messages = {
"int": "Integer expected at position 1 in `1`",
"ibase": "Base `1` is not an integer greater than 1.",
}
rules = {
"IntegerDigits[n_]": "IntegerDigits[n, 10]",
}
summary_text = "digits of an integer in any base"
def apply_len(self, n, base, length, evaluation):
"IntegerDigits[n_, base_, length_]"
if not (isinstance(length, Integer) and length.get_int_value() >= 0):
return evaluation.message("IntegerDigits", "intnn")
return self.apply(n, base, evaluation, nr_elements=length.get_int_value())
def apply(self, n, base, evaluation, nr_elements=None):
"IntegerDigits[n_, base_]"
if not (isinstance(n, Integer)):
return evaluation.message(
"IntegerDigits", "int", Expression("IntegerDigits", n, base)
)
if not (isinstance(base, Integer) and base.get_int_value() > 1):
return evaluation.message("IntegerDigits", "ibase", base)
if nr_elements == 0:
# trivial case: we don't want any digits
return Expression(SymbolList)
digits = convert_int_to_digit_list(n.get_int_value(), base.get_int_value())
if nr_elements is not None:
if len(digits) >= nr_elements:
# Truncate, preserving the digits on the right
digits = digits[-nr_elements:]
else:
# Pad with zeroes
digits = [0] * (nr_elements - len(digits)) + digits
return Expression(SymbolList, *digits)
class IntegerExponent(Builtin):
"""
<dl>
<dt>'IntegerExponent[$n$, $b$]'
<dd>gives the highest exponent of $b$ that divides $n$.
</dl>
>> IntegerExponent[16, 2]
= 4
>> IntegerExponent[-510000]
= 4
>> IntegerExponent[10, b]
= IntegerExponent[10, b]
"""
attributes = listable | protected
messages = {
"int": "Integer expected at position 1 in `1`",
"ibase": "Base `1` is not an integer greater than 1.",
}
rules = {
"IntegerExponent[n_]": "IntegerExponent[n, 10]",
}
summary_text = "number of trailing 0s in a given base"
def apply(self, n, b, evaluation):
"IntegerExponent[n_Integer, b_Integer]"
py_n, py_b = n.to_python(), b.to_python()
expr = Expression("IntegerExponent", n, b)
if not isinstance(py_n, int):
evaluation.message("IntegerExponent", "int", expr)
py_n = abs(py_n)
if not (isinstance(py_b, int) and py_b > 1):
evaluation.message("IntegerExponent", "ibase", b)
# TODO: Optimise this (dont need to calc. base^result)
# NOTE: IntegerExponent[a,b] causes a Python error here when a or b are
# symbols
result = 1
while py_n % (py_b ** result) == 0:
result += 1
return Integer(result - 1)
class IntegerLength(Builtin):
"""
<dl>
<dt>'IntegerLength[$x$]'
<dd>gives the number of digits in the base-10 representation of $x$.
<dt>'IntegerLength[$x$, $b$]'
<dd>gives the number of base-$b$ digits in $x$.
</dl>
>> IntegerLength[123456]
= 6
>> IntegerLength[10^10000]
= 10001
>> IntegerLength[-10^1000]
= 1001
'IntegerLength' with base 2:
>> IntegerLength[8, 2]
= 4
Check that 'IntegerLength' is correct for the first 100 powers of 10:
>> IntegerLength /@ (10 ^ Range[100]) == Range[2, 101]
= True
The base must be greater than 1:
>> IntegerLength[3, -2]
: Base -2 is not an integer greater than 1.
= IntegerLength[3, -2]
'0' is a special case:
>> IntegerLength[0]
= 0
#> IntegerLength /@ (10 ^ Range[100] - 1) == Range[1, 100]
= True
"""
attributes = listable | protected
messages = {
"base": "Base `1` is not an integer greater than 1.",
}
rules = {
"IntegerLength[n_]": "IntegerLength[n, 10]",
}
summary_text = "total number of digits in any base"
def apply(self, n, b, evaluation):
"IntegerLength[n_, b_]"
n, b = n.get_int_value(), b.get_int_value()
if n is None or b is None:
evaluation.message("IntegerLength", "int")
return
if b <= 1:
evaluation.message("IntegerLength", "base", b)
return
if n == 0:
# special case
return Integer0
n = abs(n)
# O(log(digits))
# find bounds
j = 1
while b ** j <= n:
j *= 2
i = j // 2
# bisection
while i + 1 < j:
# assert b ** i <= n <= b ** j
k = (i + j) // 2
if b ** k <= n:
i = k
else:
j = k
return Integer(j)
class InexactNumberQ(Test):
"""
<dl>
<dt>'InexactNumberQ[$expr$]'
<dd>returns 'True' if $expr$ is not an exact number, and 'False' otherwise.
</dl>
>> InexactNumberQ[a]
= False
>> InexactNumberQ[3.0]
= True
>> InexactNumberQ[2/3]
= False
'InexactNumberQ' can be applied to complex numbers:
>> InexactNumberQ[4.0+I]
= True
"""
summary_text = "the negation of ExactNumberQ"
def test(self, expr):
return isinstance(expr, Number) and expr.is_inexact()
class IntegerQ(Test):
"""
<dl>
<dt>'IntegerQ[$expr$]'
<dd>returns 'True' if $expr$ is an integer, and 'False' otherwise.
</dl>
>> IntegerQ[3]
= True
>> IntegerQ[Pi]
= False
"""
summary_text = "test whether an expression is an integer"
def test(self, expr):
return isinstance(expr, Integer)
class MachineNumberQ(Test):
"""
<dl>
<dt>'MachineNumberQ[$expr$]'
<dd>returns 'True' if $expr$ is a machine-precision real or complex number.
</dl>
= True
>> MachineNumberQ[3.14159265358979324]
= False
>> MachineNumberQ[1.5 + 2.3 I]
= True
>> MachineNumberQ[2.71828182845904524 + 3.14159265358979324 I]
= False
#> MachineNumberQ[1.5 + 3.14159265358979324 I]
= True
#> MachineNumberQ[1.5 + 5 I]
= True
"""
summary_text = "test if expression is a machine‐precision real or complex number"
def test(self, expr):
return expr.is_machine_precision()
class RealDigits(Builtin):
"""
<dl>
<dt>'RealDigits[$n$]'
<dd>returns the decimal representation of the real number $n$ as list of digits, together with the number of digits that are to the left of the decimal point.
<dt>'RealDigits[$n$, $b$]'
<dd>returns a list of base_$b$ representation of the real number $n$.
<dt>'RealDigits[$n$, $b$, $len$]'
<dd>returns a list of $len$ digits.
<dt>'RealDigits[$n$, $b$, $len$, $p$]'
<dd>return $len$ digits starting with the coefficient of $b$^$p$
</dl>
Return the list of digits and exponent:
>> RealDigits[123.55555]
= {{1, 2, 3, 5, 5, 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0}, 3}
Return an explicit recurring decimal form:
>> RealDigits[19 / 7]
= {{2, {7, 1, 4, 2, 8, 5}}, 1}
The 10000th digit of is an 8:
>> RealDigits[Pi, 10, 1, -10000]
= {{8}, -9999}
20 digits starting with the coefficient of 10^-5:
>> RealDigits[Pi, 10, 20, -5]
= {{9, 2, 6, 5, 3, 5, 8, 9, 7, 9, 3, 2, 3, 8, 4, 6, 2, 6, 4, 3}, -4}
RealDigits gives Indeterminate if more digits than the precision are requested:
>> RealDigits[123.45, 10, 18]
= {{1, 2, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Indeterminate, Indeterminate}, 3}
#> RealDigits[-1.25, -1]
: Base -1 is not a real number greater than 1.
= RealDigits[-1.25, -1]
Return 25 digits of in base 10:
>> RealDigits[Pi, 10, 25]
= {{3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5, 8, 9, 7, 9, 3, 2, 3, 8, 4, 6, 2, 6, 4, 3}, 1}
#> RealDigits[-Pi]
: The number of digits to return cannot be determined.
= RealDigits[-Pi]
#> RealDigits[I, 7]
: The value I is not a real number.
= RealDigits[I, 7]
#> RealDigits[Pi]
: The number of digits to return cannot be determined.
= RealDigits[Pi]
#> RealDigits[3 + 4 I]
: The value 3 + 4 I is not a real number.
= RealDigits[3 + 4 I]
#> RealDigits[3.14, 10, 1.5]
: Non-negative machine-sized integer expected at position 3 in RealDigits[3.14, 10, 1.5].
= RealDigits[3.14, 10, 1.5]
#> RealDigits[3.14, 10, 1, 1.5]
: Machine-sized integer expected at position 4 in RealDigits[3.14, 10, 1, 1.5].
= RealDigits[3.14, 10, 1, 1.5]
"""
attributes = listable | protected
messages = {
"realx": "The value `1` is not a real number.",
"ndig": "The number of digits to return cannot be determined.",
"rbase": "Base `1` is not a real number greater than 1.",
"intnm": "Non-negative machine-sized integer expected at position 3 in `1`.",
"intm": "Machine-sized integer expected at position 4 in `1`.",
}
summary_text = "digits of a real number"
def apply_complex(self, n, var, evaluation):
"%(name)s[n_Complex, var___]"
return evaluation.message("RealDigits", "realx", n)
def apply_rational_with_base(self, n, b, evaluation):
"%(name)s[n_Rational, b_Integer]"
# expr = Expression("RealDigits", n)
py_n = abs(n.value)
py_b = b.get_int_value()
if check_finite_decimal(n.denominator().get_int_value()) and not py_b % 2:
return self.apply_with_base(n, b, evaluation)
else:
exp = int(mpmath.ceil(mpmath.log(py_n, py_b)))
(head, tails) = convert_repeating_decimal(
py_n.as_numer_denom()[0], py_n.as_numer_denom()[1], py_b
)
leaves = []
for x in head:
if x != "0":
leaves.append(Integer(int(x)))
leaves.append(from_python(tails))
list_str = Expression(SymbolList, *leaves)
return Expression(SymbolList, list_str, exp)
def apply_rational_without_base(self, n, evaluation):
"%(name)s[n_Rational]"
return self.apply_rational_with_base(n, Integer(10), evaluation)
def apply(self, n, evaluation):
"%(name)s[n_]"
# Handling the testcases that throw the error message and return the ouput that doesn't include `base` argument
if isinstance(n, Symbol) and n.name.startswith("System`"):
return evaluation.message("RealDigits", "ndig", n)
if n.is_numeric(evaluation):
return self.apply_with_base(n, from_python(10), evaluation)
def apply_with_base(self, n, b, evaluation, nr_elements=None, pos=None):
"%(name)s[n_?NumericQ, b_Integer]"
expr = Expression("RealDigits", n)
rational_no = (
True if isinstance(n, Rational) else False
) # it is used for checking whether the input n is a rational or not
py_b = b.get_int_value()
if isinstance(n, (Expression, Symbol, Rational)):
pos_len = abs(pos) + 1 if pos is not None and pos < 0 else 1
if nr_elements is not None:
n = Expression(
"N", n, int(mpmath.log(py_b ** (nr_elements + pos_len), 10)) + 1
).evaluate(evaluation)
else:
if rational_no:
n = apply_N(n, evaluation)
else:
return evaluation.message("RealDigits", "ndig", expr)
py_n = abs(n.value)
if not py_b > 1:
return evaluation.message("RealDigits", "rbase", py_b)
if isinstance(py_n, complex):
return evaluation.message("RealDigits", "realx", expr)
if isinstance(n, Integer):
display_len = (
int(mpmath.floor(mpmath.log(py_n, py_b)))
if py_n != 0 and py_n != 1
else 1
)
else:
display_len = int(
Expression(
"N",
Expression(
"Round",
Expression(
"Divide",
Expression("Precision", py_n),
Expression("Log", 10, py_b),
),
),
)
.evaluate(evaluation)
.to_python()
)
exp = log_n_b(py_n, py_b)
if py_n == 0 and nr_elements is not None:
exp = 0
digits = []
if not py_b == 10:
digits = convert_float_base(py_n, py_b, display_len - exp)
# truncate all the leading 0's
i = 0
while digits and digits[i] == 0:
i += 1
digits = digits[i:]
if not isinstance(n, Integer):
if len(digits) > display_len:
digits = digits[: display_len - 1]
else:
# drop any leading zeroes
for x in str(py_n):
if x != "." and (digits or x != "0"):
digits.append(x)
if pos is not None:
temp = exp
exp = pos + 1
move = temp - 1 - pos
if move <= 0:
digits = [0] * abs(move) + digits
else:
digits = digits[abs(move) :]
display_len = display_len - move
leaves = []
for x in digits:
if x == "e" or x == "E":
break
# Convert to Mathics' list format
leaves.append(Integer(int(x)))
if not rational_no:
while len(leaves) < display_len:
leaves.append(Integer0)
if nr_elements is not None:
# display_len == nr_elements
if len(leaves) >= nr_elements:
# Truncate, preserving the digits on the right
leaves = leaves[:nr_elements]
else:
if isinstance(n, Integer):
while len(leaves) < nr_elements:
leaves.append(Integer0)
else:
# Adding Indeterminate if the length is greater than the precision
while len(leaves) < nr_elements:
leaves.append(from_python(Symbol("Indeterminate")))
list_str = Expression(SymbolList, *leaves)
return Expression(SymbolList, list_str, exp)
def apply_with_base_and_length(self, n, b, length, evaluation, pos=None):
"%(name)s[n_?NumericQ, b_Integer, length_]"
leaves = []
if pos is not None:
leaves.append(from_python(pos))
expr = Expression("RealDigits", n, b, length, *leaves)
if not (isinstance(length, Integer) and length.get_int_value() >= 0):
return evaluation.message("RealDigits", "intnm", expr)
return self.apply_with_base(
n, b, evaluation, nr_elements=length.get_int_value(), pos=pos
)
def apply_with_base_length_and_precision(self, n, b, length, p, evaluation):
"%(name)s[n_?NumericQ, b_Integer, length_, p_]"
if not isinstance(p, Integer):
return evaluation.message(
"RealDigits", "intm", Expression("RealDigits", n, b, length, p)
)
return self.apply_with_base_and_length(
n, b, length, evaluation, pos=p.get_int_value()
)
class MaxPrecision(Predefined):
"""
<dl>
<dt>'$MaxPrecision'
<dd>represents the maximum number of digits of precision permitted in abitrary-precision numbers.
</dl>
>> $MaxPrecision
= Infinity
>> $MaxPrecision = 10;
>> N[Pi, 11]
: Requested precision 11 is larger than $MaxPrecision. Using current $MaxPrecision of 10. instead. $MaxPrecision = Infinity specifies that any precision should be allowed.
= 3.141592654
#> N[Pi, 10]
= 3.141592654
#> $MaxPrecision = x
: Cannot set $MaxPrecision to x; value must be a positive number or Infinity.
= x
#> $MaxPrecision = -Infinity
: Cannot set $MaxPrecision to -Infinity; value must be a positive number or Infinity.
= -Infinity
#> $MaxPrecision = 0
: Cannot set $MaxPrecision to 0; value must be a positive number or Infinity.
= 0
#> $MaxPrecision = Infinity;
#> $MinPrecision = 15;
#> $MaxPrecision = 10
: Cannot set $MaxPrecision such that $MaxPrecision < $MinPrecision.
= 10
#> $MaxPrecision
= Infinity
#> $MinPrecision = 0;
"""
is_numeric = False
messages = {
"precset": "Cannot set `1` to `2`; value must be a positive number or Infinity.",
"preccon": "Cannot set `1` such that $MaxPrecision < $MinPrecision.",
}
name = "$MaxPrecision"
rules = {
"$MaxPrecision": "Infinity",
}
summary_text = "settable global maximum precision bound"
class MachineEpsilon_(Predefined):
"""
<dl>
<dt>'$MachineEpsilon'
<dd>is the distance between '1.0' and the next
nearest representable machine-precision number.
</dl>
>> $MachineEpsilon
= 2.22045*^-16
>> x = 1.0 + {0.4, 0.5, 0.6} $MachineEpsilon;
>> x - 1
= {0., 0., 2.22045*^-16}
"""
is_numeric = True
name = "$MachineEpsilon"
summary_text = "the difference between 1.0 and the next-nearest number representable as a machine-precision number"
def evaluate(self, evaluation):
return MachineReal(machine_epsilon)
class MachinePrecision_(Predefined):
"""
<dl>
<dt>'$MachinePrecision'
<dd>is the number of decimal digits of precision for machine-precision numbers.
</dl>
>> $MachinePrecision
= 15.9546
"""
name = "$MachinePrecision"
summary_text = (
"the number of decimal digits of precision for machine-precision numbers"
)
is_numeric = True
rules = {
"$MachinePrecision": "N[MachinePrecision]",
}
class MachinePrecision(Predefined):
"""
<dl>
<dt>'MachinePrecision'
<dd>represents the precision of machine precision numbers.
</dl>
>> N[MachinePrecision]
= 15.9546
>> N[MachinePrecision, 30]
= 15.9545897701910033463281614204
#> N[E, MachinePrecision]
= 2.71828
#> Round[MachinePrecision]
= 16
"""
is_numeric = True
rules = {
"N[MachinePrecision, prec_]": ("N[Log[10, 2] * %i, prec]" % machine_precision),
}
summary_text = "symbol used to indicate machine‐number precision"
class MinPrecision(Builtin):
"""
<dl>
<dt>'$MinPrecision'
<dd>represents the minimum number of digits of precision permitted in abitrary-precision numbers.
</dl>
>> $MinPrecision
= 0
>> $MinPrecision = 10;
>> N[Pi, 9]
: Requested precision 9 is smaller than $MinPrecision. Using current $MinPrecision of 10. instead.
= 3.141592654
#> N[Pi, 10]
= 3.141592654
#> $MinPrecision = x
: Cannot set $MinPrecision to x; value must be a non-negative number.
= x
#> $MinPrecision = -Infinity
: Cannot set $MinPrecision to -Infinity; value must be a non-negative number.
= -Infinity
#> $MinPrecision = -1
: Cannot set $MinPrecision to -1; value must be a non-negative number.
= -1
#> $MinPrecision = 0;
#> $MaxPrecision = 10;
#> $MinPrecision = 15
: Cannot set $MinPrecision such that $MaxPrecision < $MinPrecision.
= 15
#> $MinPrecision
= 0
#> $MaxPrecision = Infinity;
"""
messages = {
"precset": "Cannot set `1` to `2`; value must be a non-negative number.",
"preccon": "Cannot set `1` such that $MaxPrecision < $MinPrecision.",
}
name = "$MinPrecision"
is_numeric = True
rules = {
"$MinPrecision": "0",
}
summary_text = "settable global minimum precision bound"
class NumericQ(Builtin):
"""
<dl>
<dt>'NumericQ[$expr$]'
<dd>tests whether $expr$ represents a numeric quantity.
</dl>
>> NumericQ[2]
= True
>> NumericQ[Sqrt[Pi]]
= True
>> NumberQ[Sqrt[Pi]]
= False
It is possible to set that a symbol is numeric or not by assign a boolean value
to ``NumericQ``
>> NumericQ[a]=True
= True
>> NumericQ[a]
= True
>> NumericQ[Sin[a]]
= True
Clear and ClearAll do not restore the default value.
>> Clear[a]; NumericQ[a]
= True
>> ClearAll[a]; NumericQ[a]
= True
>> NumericQ[a]=False; NumericQ[a]
= False
NumericQ can only set to True or False
>> NumericQ[a] = 37
: Cannot set NumericQ[a] to 37; the lhs argument must be a symbol and the rhs must be True or False.
= 37
"""
messages = {
"argx": "NumericQ called with `1` arguments; 1 argument is expected.",
"set": "Cannot set `1` to `2`; the lhs argument must be a symbol and the rhs must be True or False.",
}
summary_text = "test whether an exprssion is a number"
def apply(self, expr, evaluation):
"NumericQ[expr_]"
return SymbolTrue if expr.is_numeric(evaluation) else SymbolFalse
class Precision(Builtin):
"""
<dl>
<dt>'Precision[$expr$]'
<dd>examines the number of significant digits of $expr$.
</dl>
This is rather a proof-of-concept than a full implementation.
Precision of compound expression is not supported yet.
>> Precision[1]
= Infinity
>> Precision[1/2]
= Infinity
>> Precision[0.5]
= MachinePrecision
#> Precision[0.0]
= MachinePrecision
#> Precision[0.000000000000000000000000000000000000]
= 0.
#> Precision[-0.0]
= MachinePrecision
#> Precision[-0.000000000000000000000000000000000000]
= 0.
#> 1.0000000000000000 // Precision
= MachinePrecision
#> 1.00000000000000000 // Precision
= 17.
#> 0.4 + 2.4 I // Precision
= MachinePrecision
#> Precision[2 + 3 I]
= Infinity
#> Precision["abc"]
= Infinity
"""
rules = {
"Precision[z_?MachineNumberQ]": "MachinePrecision",
}
summary_text = "find the precision of a number"
def apply(self, z, evaluation):
"Precision[z_]"
if not z.is_inexact():
return Symbol("Infinity")
elif z.to_sympy().is_zero:
return Real(0)
else:
return Real(dps(z.get_precision()))
| # cython: language_level=3
# -*- coding: utf-8 -*-
# Note: docstring is flowed in documentation. Line breaks in the docstring will appear in the
# printed output, so be carful not to add then mid-sentence.
"""
Representation of Numbers
Integers and Real numbers with any number of digits, automatically tagging numerical preceision when appropriate.
Precision is not "guarded" through the evaluation process. Only integer precision is supported.
However, things like 'N[Pi, 100]' should work as expected.
"""
import sympy
import mpmath
from functools import lru_cache
from mathics.builtin.base import Builtin, Predefined, Test
from mathics.core.evaluators import apply_N
from mathics.core.expression import Expression
from mathics.core.symbols import (
Symbol,
SymbolFalse,
SymbolList,
SymbolTrue,
)
from mathics.core.atoms import (
Integer,
Integer0,
MachineReal,
Number,
Rational,
Real,
from_python,
)
from mathics.core.number import (
dps,
convert_int_to_digit_list,
machine_precision,
machine_epsilon,
)
from mathics.core.attributes import (
listable,
protected,
)
@lru_cache(maxsize=1024)
def log_n_b(py_n, py_b) -> int:
return int(mpmath.ceil(mpmath.log(py_n, py_b))) if py_n != 0 and py_n != 1 else 1
def check_finite_decimal(denominator):
# The rational number is finite decimal if the denominator has form 2^a * 5^b
while denominator % 5 == 0:
denominator = denominator / 5
while denominator % 2 == 0:
denominator = denominator / 2
return True if denominator == 1 else False
def convert_repeating_decimal(numerator, denominator, base):
head = [x for x in str(numerator // denominator)]
tails = []
subresults = [numerator % denominator]
numerator %= denominator
while numerator != 0: # only rational input can go to this case
numerator *= base
result_digit, numerator = divmod(numerator, denominator)
tails.append(str(result_digit))
if numerator not in subresults:
subresults.append(numerator)
else:
break
for i in range(len(head) - 1, -1, -1):
j = len(tails) - 1
if head[i] != tails[j]:
break
else:
del tails[j]
tails.insert(0, head[i])
del head[i]
j = j - 1
# truncate all leading 0's
if all(elem == "0" for elem in head):
for i in range(0, len(tails)):
if tails[0] == "0":
tails = tails[1:] + [str(0)]
else:
break
return (head, tails)
def convert_float_base(x, base, precision=10):
length_of_int = 0 if x == 0 else int(mpmath.log(x, base))
# iexps = list(range(length_of_int, -1, -1))
def convert_int(x, base, exponents):
out = []
for e in range(0, exponents + 1):
d = x % base
out.append(d)
x = x / base
if x == 0:
break
out.reverse()
return out
def convert_float(x, base, exponents):
out = []
for e in range(0, exponents):
d = int(x * base)
out.append(d)
x = (x * base) - d
if x == 0:
break
return out
int_part = convert_int(int(x), base, length_of_int)
if isinstance(x, (float, sympy.Float)):
# fexps = list(range(-1, -int(precision + 1), -1))
real_part = convert_float(x - int(x), base, precision + 1)
return int_part + real_part
elif isinstance(x, int):
return int_part
else:
raise TypeError(x)
class ExactNumberQ(Test):
"""
<dl>
<dt>'ExactNumberQ[$expr$]'
<dd>returns 'True' if $expr$ is an exact number, and 'False' otherwise.
</dl>
>> ExactNumberQ[10]
= True
>> ExactNumberQ[4.0]
= False
>> ExactNumberQ[n]
= False
'ExactNumberQ' can be applied to complex numbers:
>> ExactNumberQ[1 + I]
= True
>> ExactNumberQ[1 + 1. I]
= False
"""
summary_text = "test if an expression is an exact real or complex number"
def test(self, expr):
return isinstance(expr, Number) and not expr.is_inexact()
class IntegerDigits(Builtin):
"""
<dl>
<dt>'IntegerDigits[$n$]'
<dd>returns a list of the base-10 digits in the integer $n$.
<dt>'IntegerDigits[$n$, $base$]'
<dd>returns a list of the base-$base$ digits in $n$.
<dt>'IntegerDigits[$n$, $base$, $length$]'
<dd>returns a list of length $length$, truncating or padding
with zeroes on the left as necessary.
</dl>
>> IntegerDigits[76543]
= {7, 6, 5, 4, 3}
The sign of $n$ is discarded:
>> IntegerDigits[-76543]
= {7, 6, 5, 4, 3}
>> IntegerDigits[15, 16]
= {15}
>> IntegerDigits[1234, 16]
= {4, 13, 2}
>> IntegerDigits[1234, 10, 5]
= {0, 1, 2, 3, 4}
#> IntegerDigits[1000, 10]
= {1, 0, 0, 0}
#> IntegerDigits[0]
= {0}
"""
attributes = listable | protected
messages = {
"int": "Integer expected at position 1 in `1`",
"ibase": "Base `1` is not an integer greater than 1.",
}
rules = {
"IntegerDigits[n_]": "IntegerDigits[n, 10]",
}
summary_text = "digits of an integer in any base"
def apply_len(self, n, base, length, evaluation):
"IntegerDigits[n_, base_, length_]"
if not (isinstance(length, Integer) and length.get_int_value() >= 0):
return evaluation.message("IntegerDigits", "intnn")
return self.apply(n, base, evaluation, nr_elements=length.get_int_value())
def apply(self, n, base, evaluation, nr_elements=None):
"IntegerDigits[n_, base_]"
if not (isinstance(n, Integer)):
return evaluation.message(
"IntegerDigits", "int", Expression("IntegerDigits", n, base)
)
if not (isinstance(base, Integer) and base.get_int_value() > 1):
return evaluation.message("IntegerDigits", "ibase", base)
if nr_elements == 0:
# trivial case: we don't want any digits
return Expression(SymbolList)
digits = convert_int_to_digit_list(n.get_int_value(), base.get_int_value())
if nr_elements is not None:
if len(digits) >= nr_elements:
# Truncate, preserving the digits on the right
digits = digits[-nr_elements:]
else:
# Pad with zeroes
digits = [0] * (nr_elements - len(digits)) + digits
return Expression(SymbolList, *digits)
class IntegerExponent(Builtin):
"""
<dl>
<dt>'IntegerExponent[$n$, $b$]'
<dd>gives the highest exponent of $b$ that divides $n$.
</dl>
>> IntegerExponent[16, 2]
= 4
>> IntegerExponent[-510000]
= 4
>> IntegerExponent[10, b]
= IntegerExponent[10, b]
"""
attributes = listable | protected
messages = {
"int": "Integer expected at position 1 in `1`",
"ibase": "Base `1` is not an integer greater than 1.",
}
rules = {
"IntegerExponent[n_]": "IntegerExponent[n, 10]",
}
summary_text = "number of trailing 0s in a given base"
def apply(self, n, b, evaluation):
"IntegerExponent[n_Integer, b_Integer]"
py_n, py_b = n.to_python(), b.to_python()
expr = Expression("IntegerExponent", n, b)
if not isinstance(py_n, int):
evaluation.message("IntegerExponent", "int", expr)
py_n = abs(py_n)
if not (isinstance(py_b, int) and py_b > 1):
evaluation.message("IntegerExponent", "ibase", b)
# TODO: Optimise this (dont need to calc. base^result)
# NOTE: IntegerExponent[a,b] causes a Python error here when a or b are
# symbols
result = 1
while py_n % (py_b ** result) == 0:
result += 1
return Integer(result - 1)
class IntegerLength(Builtin):
"""
<dl>
<dt>'IntegerLength[$x$]'
<dd>gives the number of digits in the base-10 representation of $x$.
<dt>'IntegerLength[$x$, $b$]'
<dd>gives the number of base-$b$ digits in $x$.
</dl>
>> IntegerLength[123456]
= 6
>> IntegerLength[10^10000]
= 10001
>> IntegerLength[-10^1000]
= 1001
'IntegerLength' with base 2:
>> IntegerLength[8, 2]
= 4
Check that 'IntegerLength' is correct for the first 100 powers of 10:
>> IntegerLength /@ (10 ^ Range[100]) == Range[2, 101]
= True
The base must be greater than 1:
>> IntegerLength[3, -2]
: Base -2 is not an integer greater than 1.
= IntegerLength[3, -2]
'0' is a special case:
>> IntegerLength[0]
= 0
#> IntegerLength /@ (10 ^ Range[100] - 1) == Range[1, 100]
= True
"""
attributes = listable | protected
messages = {
"base": "Base `1` is not an integer greater than 1.",
}
rules = {
"IntegerLength[n_]": "IntegerLength[n, 10]",
}
summary_text = "total number of digits in any base"
def apply(self, n, b, evaluation):
"IntegerLength[n_, b_]"
n, b = n.get_int_value(), b.get_int_value()
if n is None or b is None:
evaluation.message("IntegerLength", "int")
return
if b <= 1:
evaluation.message("IntegerLength", "base", b)
return
if n == 0:
# special case
return Integer0
n = abs(n)
# O(log(digits))
# find bounds
j = 1
while b ** j <= n:
j *= 2
i = j // 2
# bisection
while i + 1 < j:
# assert b ** i <= n <= b ** j
k = (i + j) // 2
if b ** k <= n:
i = k
else:
j = k
return Integer(j)
class InexactNumberQ(Test):
"""
<dl>
<dt>'InexactNumberQ[$expr$]'
<dd>returns 'True' if $expr$ is not an exact number, and 'False' otherwise.
</dl>
>> InexactNumberQ[a]
= False
>> InexactNumberQ[3.0]
= True
>> InexactNumberQ[2/3]
= False
'InexactNumberQ' can be applied to complex numbers:
>> InexactNumberQ[4.0+I]
= True
"""
summary_text = "the negation of ExactNumberQ"
def test(self, expr):
return isinstance(expr, Number) and expr.is_inexact()
class IntegerQ(Test):
"""
<dl>
<dt>'IntegerQ[$expr$]'
<dd>returns 'True' if $expr$ is an integer, and 'False' otherwise.
</dl>
>> IntegerQ[3]
= True
>> IntegerQ[Pi]
= False
"""
summary_text = "test whether an expression is an integer"
def test(self, expr):
return isinstance(expr, Integer)
class MachineNumberQ(Test):
"""
<dl>
<dt>'MachineNumberQ[$expr$]'
<dd>returns 'True' if $expr$ is a machine-precision real or complex number.
</dl>
= True
>> MachineNumberQ[3.14159265358979324]
= False
>> MachineNumberQ[1.5 + 2.3 I]
= True
>> MachineNumberQ[2.71828182845904524 + 3.14159265358979324 I]
= False
#> MachineNumberQ[1.5 + 3.14159265358979324 I]
= True
#> MachineNumberQ[1.5 + 5 I]
= True
"""
summary_text = "test if expression is a machine‐precision real or complex number"
def test(self, expr):
return expr.is_machine_precision()
class RealDigits(Builtin):
"""
<dl>
<dt>'RealDigits[$n$]'
<dd>returns the decimal representation of the real number $n$ as list of digits, together with the number of digits that are to the left of the decimal point.
<dt>'RealDigits[$n$, $b$]'
<dd>returns a list of base_$b$ representation of the real number $n$.
<dt>'RealDigits[$n$, $b$, $len$]'
<dd>returns a list of $len$ digits.
<dt>'RealDigits[$n$, $b$, $len$, $p$]'
<dd>return $len$ digits starting with the coefficient of $b$^$p$
</dl>
Return the list of digits and exponent:
>> RealDigits[123.55555]
= {{1, 2, 3, 5, 5, 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0}, 3}
Return an explicit recurring decimal form:
>> RealDigits[19 / 7]
= {{2, {7, 1, 4, 2, 8, 5}}, 1}
The 10000th digit of is an 8:
>> RealDigits[Pi, 10, 1, -10000]
= {{8}, -9999}
20 digits starting with the coefficient of 10^-5:
>> RealDigits[Pi, 10, 20, -5]
= {{9, 2, 6, 5, 3, 5, 8, 9, 7, 9, 3, 2, 3, 8, 4, 6, 2, 6, 4, 3}, -4}
RealDigits gives Indeterminate if more digits than the precision are requested:
>> RealDigits[123.45, 10, 18]
= {{1, 2, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Indeterminate, Indeterminate}, 3}
#> RealDigits[-1.25, -1]
: Base -1 is not a real number greater than 1.
= RealDigits[-1.25, -1]
Return 25 digits of in base 10:
>> RealDigits[Pi, 10, 25]
= {{3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5, 8, 9, 7, 9, 3, 2, 3, 8, 4, 6, 2, 6, 4, 3}, 1}
#> RealDigits[-Pi]
: The number of digits to return cannot be determined.
= RealDigits[-Pi]
#> RealDigits[I, 7]
: The value I is not a real number.
= RealDigits[I, 7]
#> RealDigits[Pi]
: The number of digits to return cannot be determined.
= RealDigits[Pi]
#> RealDigits[3 + 4 I]
: The value 3 + 4 I is not a real number.
= RealDigits[3 + 4 I]
#> RealDigits[3.14, 10, 1.5]
: Non-negative machine-sized integer expected at position 3 in RealDigits[3.14, 10, 1.5].
= RealDigits[3.14, 10, 1.5]
#> RealDigits[3.14, 10, 1, 1.5]
: Machine-sized integer expected at position 4 in RealDigits[3.14, 10, 1, 1.5].
= RealDigits[3.14, 10, 1, 1.5]
"""
attributes = listable | protected
messages = {
"realx": "The value `1` is not a real number.",
"ndig": "The number of digits to return cannot be determined.",
"rbase": "Base `1` is not a real number greater than 1.",
"intnm": "Non-negative machine-sized integer expected at position 3 in `1`.",
"intm": "Machine-sized integer expected at position 4 in `1`.",
}
summary_text = "digits of a real number"
def apply_complex(self, n, var, evaluation):
"%(name)s[n_Complex, var___]"
return evaluation.message("RealDigits", "realx", n)
def apply_rational_with_base(self, n, b, evaluation):
"%(name)s[n_Rational, b_Integer]"
# expr = Expression("RealDigits", n)
py_n = abs(n.value)
py_b = b.get_int_value()
if check_finite_decimal(n.denominator().get_int_value()) and not py_b % 2:
return self.apply_with_base(n, b, evaluation)
else:
exp = int(mpmath.ceil(mpmath.log(py_n, py_b)))
(head, tails) = convert_repeating_decimal(
py_n.as_numer_denom()[0], py_n.as_numer_denom()[1], py_b
)
leaves = []
for x in head:
if x != "0":
leaves.append(Integer(int(x)))
leaves.append(from_python(tails))
list_str = Expression(SymbolList, *leaves)
return Expression(SymbolList, list_str, exp)
def apply_rational_without_base(self, n, evaluation):
"%(name)s[n_Rational]"
return self.apply_rational_with_base(n, Integer(10), evaluation)
def apply(self, n, evaluation):
"%(name)s[n_]"
# Handling the testcases that throw the error message and return the ouput that doesn't include `base` argument
if isinstance(n, Symbol) and n.name.startswith("System`"):
return evaluation.message("RealDigits", "ndig", n)
if n.is_numeric(evaluation):
return self.apply_with_base(n, from_python(10), evaluation)
def apply_with_base(self, n, b, evaluation, nr_elements=None, pos=None):
"%(name)s[n_?NumericQ, b_Integer]"
expr = Expression("RealDigits", n)
rational_no = (
True if isinstance(n, Rational) else False
) # it is used for checking whether the input n is a rational or not
py_b = b.get_int_value()
if isinstance(n, (Expression, Symbol, Rational)):
pos_len = abs(pos) + 1 if pos is not None and pos < 0 else 1
if nr_elements is not None:
n = Expression(
"N", n, int(mpmath.log(py_b ** (nr_elements + pos_len), 10)) + 1
).evaluate(evaluation)
else:
if rational_no:
n = apply_N(n, evaluation)
else:
return evaluation.message("RealDigits", "ndig", expr)
py_n = abs(n.value)
if not py_b > 1:
return evaluation.message("RealDigits", "rbase", py_b)
if isinstance(py_n, complex):
return evaluation.message("RealDigits", "realx", expr)
if isinstance(n, Integer):
display_len = (
int(mpmath.floor(mpmath.log(py_n, py_b)))
if py_n != 0 and py_n != 1
else 1
)
else:
display_len = int(
Expression(
"N",
Expression(
"Round",
Expression(
"Divide",
Expression("Precision", py_n),
Expression("Log", 10, py_b),
),
),
)
.evaluate(evaluation)
.to_python()
)
exp = log_n_b(py_n, py_b)
if py_n == 0 and nr_elements is not None:
exp = 0
digits = []
if not py_b == 10:
digits = convert_float_base(py_n, py_b, display_len - exp)
# truncate all the leading 0's
i = 0
while digits and digits[i] == 0:
i += 1
digits = digits[i:]
if not isinstance(n, Integer):
if len(digits) > display_len:
digits = digits[: display_len - 1]
else:
# drop any leading zeroes
for x in str(py_n):
if x != "." and (digits or x != "0"):
digits.append(x)
if pos is not None:
temp = exp
exp = pos + 1
move = temp - 1 - pos
if move <= 0:
digits = [0] * abs(move) + digits
else:
digits = digits[abs(move) :]
display_len = display_len - move
leaves = []
for x in digits:
if x == "e" or x == "E":
break
# Convert to Mathics' list format
leaves.append(Integer(int(x)))
if not rational_no:
while len(leaves) < display_len:
leaves.append(Integer0)
if nr_elements is not None:
# display_len == nr_elements
if len(leaves) >= nr_elements:
# Truncate, preserving the digits on the right
leaves = leaves[:nr_elements]
else:
if isinstance(n, Integer):
while len(leaves) < nr_elements:
leaves.append(Integer0)
else:
# Adding Indeterminate if the length is greater than the precision
while len(leaves) < nr_elements:
leaves.append(from_python(Symbol("Indeterminate")))
list_str = Expression(SymbolList, *leaves)
return Expression(SymbolList, list_str, exp)
def apply_with_base_and_length(self, n, b, length, evaluation, pos=None):
"%(name)s[n_?NumericQ, b_Integer, length_]"
leaves = []
if pos is not None:
leaves.append(from_python(pos))
expr = Expression("RealDigits", n, b, length, *leaves)
if not (isinstance(length, Integer) and length.get_int_value() >= 0):
return evaluation.message("RealDigits", "intnm", expr)
return self.apply_with_base(
n, b, evaluation, nr_elements=length.get_int_value(), pos=pos
)
def apply_with_base_length_and_precision(self, n, b, length, p, evaluation):
"%(name)s[n_?NumericQ, b_Integer, length_, p_]"
if not isinstance(p, Integer):
return evaluation.message(
"RealDigits", "intm", Expression("RealDigits", n, b, length, p)
)
return self.apply_with_base_and_length(
n, b, length, evaluation, pos=p.get_int_value()
)
class MaxPrecision(Predefined):
"""
<dl>
<dt>'$MaxPrecision'
<dd>represents the maximum number of digits of precision permitted in abitrary-precision numbers.
</dl>
>> $MaxPrecision
= Infinity
>> $MaxPrecision = 10;
>> N[Pi, 11]
: Requested precision 11 is larger than $MaxPrecision. Using current $MaxPrecision of 10. instead. $MaxPrecision = Infinity specifies that any precision should be allowed.
= 3.141592654
#> N[Pi, 10]
= 3.141592654
#> $MaxPrecision = x
: Cannot set $MaxPrecision to x; value must be a positive number or Infinity.
= x
#> $MaxPrecision = -Infinity
: Cannot set $MaxPrecision to -Infinity; value must be a positive number or Infinity.
= -Infinity
#> $MaxPrecision = 0
: Cannot set $MaxPrecision to 0; value must be a positive number or Infinity.
= 0
#> $MaxPrecision = Infinity;
#> $MinPrecision = 15;
#> $MaxPrecision = 10
: Cannot set $MaxPrecision such that $MaxPrecision < $MinPrecision.
= 10
#> $MaxPrecision
= Infinity
#> $MinPrecision = 0;
"""
is_numeric = False
messages = {
"precset": "Cannot set `1` to `2`; value must be a positive number or Infinity.",
"preccon": "Cannot set `1` such that $MaxPrecision < $MinPrecision.",
}
name = "$MaxPrecision"
rules = {
"$MaxPrecision": "Infinity",
}
summary_text = "settable global maximum precision bound"
class MachineEpsilon_(Predefined):
"""
<dl>
<dt>'$MachineEpsilon'
<dd>is the distance between '1.0' and the next
nearest representable machine-precision number.
</dl>
>> $MachineEpsilon
= 2.22045*^-16
>> x = 1.0 + {0.4, 0.5, 0.6} $MachineEpsilon;
>> x - 1
= {0., 0., 2.22045*^-16}
"""
is_numeric = True
name = "$MachineEpsilon"
summary_text = "the difference between 1.0 and the next-nearest number representable as a machine-precision number"
def evaluate(self, evaluation):
return MachineReal(machine_epsilon)
class MachinePrecision_(Predefined):
"""
<dl>
<dt>'$MachinePrecision'
<dd>is the number of decimal digits of precision for machine-precision numbers.
</dl>
>> $MachinePrecision
= 15.9546
"""
name = "$MachinePrecision"
summary_text = (
"the number of decimal digits of precision for machine-precision numbers"
)
is_numeric = True
rules = {
"$MachinePrecision": "N[MachinePrecision]",
}
class MachinePrecision(Predefined):
"""
<dl>
<dt>'MachinePrecision'
<dd>represents the precision of machine precision numbers.
</dl>
>> N[MachinePrecision]
= 15.9546
>> N[MachinePrecision, 30]
= 15.9545897701910033463281614204
#> N[E, MachinePrecision]
= 2.71828
#> Round[MachinePrecision]
= 16
"""
is_numeric = True
rules = {
"N[MachinePrecision, prec_]": ("N[Log[10, 2] * %i, prec]" % machine_precision),
}
summary_text = "symbol used to indicate machine‐number precision"
class MinPrecision(Builtin):
"""
<dl>
<dt>'$MinPrecision'
<dd>represents the minimum number of digits of precision permitted in abitrary-precision numbers.
</dl>
>> $MinPrecision
= 0
>> $MinPrecision = 10;
>> N[Pi, 9]
: Requested precision 9 is smaller than $MinPrecision. Using current $MinPrecision of 10. instead.
= 3.141592654
#> N[Pi, 10]
= 3.141592654
#> $MinPrecision = x
: Cannot set $MinPrecision to x; value must be a non-negative number.
= x
#> $MinPrecision = -Infinity
: Cannot set $MinPrecision to -Infinity; value must be a non-negative number.
= -Infinity
#> $MinPrecision = -1
: Cannot set $MinPrecision to -1; value must be a non-negative number.
= -1
#> $MinPrecision = 0;
#> $MaxPrecision = 10;
#> $MinPrecision = 15
: Cannot set $MinPrecision such that $MaxPrecision < $MinPrecision.
= 15
#> $MinPrecision
= 0
#> $MaxPrecision = Infinity;
"""
messages = {
"precset": "Cannot set `1` to `2`; value must be a non-negative number.",
"preccon": "Cannot set `1` such that $MaxPrecision < $MinPrecision.",
}
name = "$MinPrecision"
is_numeric = True
rules = {
"$MinPrecision": "0",
}
summary_text = "settable global minimum precision bound"
class NumericQ(Builtin):
"""
<dl>
<dt>'NumericQ[$expr$]'
<dd>tests whether $expr$ represents a numeric quantity.
</dl>
>> NumericQ[2]
= True
>> NumericQ[Sqrt[Pi]]
= True
>> NumberQ[Sqrt[Pi]]
= False
It is possible to set that a symbol is numeric or not by assign a boolean value
to ``NumericQ``
>> NumericQ[a]=True
= True
>> NumericQ[a]
= True
>> NumericQ[Sin[a]]
= True
Clear and ClearAll do not restore the default value.
>> Clear[a]; NumericQ[a]
= True
>> ClearAll[a]; NumericQ[a]
= True
>> NumericQ[a]=False; NumericQ[a]
= False
NumericQ can only set to True or False
>> NumericQ[a] = 37
: Cannot set NumericQ[a] to 37; the lhs argument must be a symbol and the rhs must be True or False.
= 37
"""
messages = {
"argx": "NumericQ called with `1` arguments; 1 argument is expected.",
"set": "Cannot set `1` to `2`; the lhs argument must be a symbol and the rhs must be True or False.",
}
summary_text = "test whether an exprssion is a number"
def apply(self, expr, evaluation):
"NumericQ[expr_]"
return SymbolTrue if expr.is_numeric(evaluation) else SymbolFalse
class Precision(Builtin):
"""
<dl>
<dt>'Precision[$expr$]'
<dd>examines the number of significant digits of $expr$.
</dl>
This is rather a proof-of-concept than a full implementation.
Precision of compound expression is not supported yet.
>> Precision[1]
= Infinity
>> Precision[1/2]
= Infinity
>> Precision[0.5]
= MachinePrecision
#> Precision[0.0]
= MachinePrecision
#> Precision[0.000000000000000000000000000000000000]
= 0.
#> Precision[-0.0]
= MachinePrecision
#> Precision[-0.000000000000000000000000000000000000]
= 0.
#> 1.0000000000000000 // Precision
= MachinePrecision
#> 1.00000000000000000 // Precision
= 17.
#> 0.4 + 2.4 I // Precision
= MachinePrecision
#> Precision[2 + 3 I]
= Infinity
#> Precision["abc"]
= Infinity
"""
rules = {
"Precision[z_?MachineNumberQ]": "MachinePrecision",
}
summary_text = "find the precision of a number"
def apply(self, z, evaluation):
"Precision[z_]"
if not z.is_inexact():
return Symbol("Infinity")
elif z.to_sympy().is_zero:
return Real(0)
else:
return Real(dps(z.get_precision())) | en | 0.542937 | # cython: language_level=3 # -*- coding: utf-8 -*- # Note: docstring is flowed in documentation. Line breaks in the docstring will appear in the # printed output, so be carful not to add then mid-sentence. Representation of Numbers Integers and Real numbers with any number of digits, automatically tagging numerical preceision when appropriate. Precision is not "guarded" through the evaluation process. Only integer precision is supported. However, things like 'N[Pi, 100]' should work as expected. # The rational number is finite decimal if the denominator has form 2^a * 5^b # only rational input can go to this case # truncate all leading 0's # iexps = list(range(length_of_int, -1, -1)) # fexps = list(range(-1, -int(precision + 1), -1)) <dl> <dt>'ExactNumberQ[$expr$]' <dd>returns 'True' if $expr$ is an exact number, and 'False' otherwise. </dl> >> ExactNumberQ[10] = True >> ExactNumberQ[4.0] = False >> ExactNumberQ[n] = False 'ExactNumberQ' can be applied to complex numbers: >> ExactNumberQ[1 + I] = True >> ExactNumberQ[1 + 1. I] = False <dl> <dt>'IntegerDigits[$n$]' <dd>returns a list of the base-10 digits in the integer $n$. <dt>'IntegerDigits[$n$, $base$]' <dd>returns a list of the base-$base$ digits in $n$. <dt>'IntegerDigits[$n$, $base$, $length$]' <dd>returns a list of length $length$, truncating or padding with zeroes on the left as necessary. </dl> >> IntegerDigits[76543] = {7, 6, 5, 4, 3} The sign of $n$ is discarded: >> IntegerDigits[-76543] = {7, 6, 5, 4, 3} >> IntegerDigits[15, 16] = {15} >> IntegerDigits[1234, 16] = {4, 13, 2} >> IntegerDigits[1234, 10, 5] = {0, 1, 2, 3, 4} #> IntegerDigits[1000, 10] = {1, 0, 0, 0} #> IntegerDigits[0] = {0} # trivial case: we don't want any digits # Truncate, preserving the digits on the right # Pad with zeroes <dl> <dt>'IntegerExponent[$n$, $b$]' <dd>gives the highest exponent of $b$ that divides $n$. </dl> >> IntegerExponent[16, 2] = 4 >> IntegerExponent[-510000] = 4 >> IntegerExponent[10, b] = IntegerExponent[10, b] # TODO: Optimise this (dont need to calc. base^result) # NOTE: IntegerExponent[a,b] causes a Python error here when a or b are # symbols <dl> <dt>'IntegerLength[$x$]' <dd>gives the number of digits in the base-10 representation of $x$. <dt>'IntegerLength[$x$, $b$]' <dd>gives the number of base-$b$ digits in $x$. </dl> >> IntegerLength[123456] = 6 >> IntegerLength[10^10000] = 10001 >> IntegerLength[-10^1000] = 1001 'IntegerLength' with base 2: >> IntegerLength[8, 2] = 4 Check that 'IntegerLength' is correct for the first 100 powers of 10: >> IntegerLength /@ (10 ^ Range[100]) == Range[2, 101] = True The base must be greater than 1: >> IntegerLength[3, -2] : Base -2 is not an integer greater than 1. = IntegerLength[3, -2] '0' is a special case: >> IntegerLength[0] = 0 #> IntegerLength /@ (10 ^ Range[100] - 1) == Range[1, 100] = True # special case # O(log(digits)) # find bounds # bisection # assert b ** i <= n <= b ** j <dl> <dt>'InexactNumberQ[$expr$]' <dd>returns 'True' if $expr$ is not an exact number, and 'False' otherwise. </dl> >> InexactNumberQ[a] = False >> InexactNumberQ[3.0] = True >> InexactNumberQ[2/3] = False 'InexactNumberQ' can be applied to complex numbers: >> InexactNumberQ[4.0+I] = True <dl> <dt>'IntegerQ[$expr$]' <dd>returns 'True' if $expr$ is an integer, and 'False' otherwise. </dl> >> IntegerQ[3] = True >> IntegerQ[Pi] = False <dl> <dt>'MachineNumberQ[$expr$]' <dd>returns 'True' if $expr$ is a machine-precision real or complex number. </dl> = True >> MachineNumberQ[3.14159265358979324] = False >> MachineNumberQ[1.5 + 2.3 I] = True >> MachineNumberQ[2.71828182845904524 + 3.14159265358979324 I] = False #> MachineNumberQ[1.5 + 3.14159265358979324 I] = True #> MachineNumberQ[1.5 + 5 I] = True <dl> <dt>'RealDigits[$n$]' <dd>returns the decimal representation of the real number $n$ as list of digits, together with the number of digits that are to the left of the decimal point. <dt>'RealDigits[$n$, $b$]' <dd>returns a list of base_$b$ representation of the real number $n$. <dt>'RealDigits[$n$, $b$, $len$]' <dd>returns a list of $len$ digits. <dt>'RealDigits[$n$, $b$, $len$, $p$]' <dd>return $len$ digits starting with the coefficient of $b$^$p$ </dl> Return the list of digits and exponent: >> RealDigits[123.55555] = {{1, 2, 3, 5, 5, 5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0}, 3} Return an explicit recurring decimal form: >> RealDigits[19 / 7] = {{2, {7, 1, 4, 2, 8, 5}}, 1} The 10000th digit of is an 8: >> RealDigits[Pi, 10, 1, -10000] = {{8}, -9999} 20 digits starting with the coefficient of 10^-5: >> RealDigits[Pi, 10, 20, -5] = {{9, 2, 6, 5, 3, 5, 8, 9, 7, 9, 3, 2, 3, 8, 4, 6, 2, 6, 4, 3}, -4} RealDigits gives Indeterminate if more digits than the precision are requested: >> RealDigits[123.45, 10, 18] = {{1, 2, 3, 4, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Indeterminate, Indeterminate}, 3} #> RealDigits[-1.25, -1] : Base -1 is not a real number greater than 1. = RealDigits[-1.25, -1] Return 25 digits of in base 10: >> RealDigits[Pi, 10, 25] = {{3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5, 8, 9, 7, 9, 3, 2, 3, 8, 4, 6, 2, 6, 4, 3}, 1} #> RealDigits[-Pi] : The number of digits to return cannot be determined. = RealDigits[-Pi] #> RealDigits[I, 7] : The value I is not a real number. = RealDigits[I, 7] #> RealDigits[Pi] : The number of digits to return cannot be determined. = RealDigits[Pi] #> RealDigits[3 + 4 I] : The value 3 + 4 I is not a real number. = RealDigits[3 + 4 I] #> RealDigits[3.14, 10, 1.5] : Non-negative machine-sized integer expected at position 3 in RealDigits[3.14, 10, 1.5]. = RealDigits[3.14, 10, 1.5] #> RealDigits[3.14, 10, 1, 1.5] : Machine-sized integer expected at position 4 in RealDigits[3.14, 10, 1, 1.5]. = RealDigits[3.14, 10, 1, 1.5] # expr = Expression("RealDigits", n) # Handling the testcases that throw the error message and return the ouput that doesn't include `base` argument # it is used for checking whether the input n is a rational or not # truncate all the leading 0's # drop any leading zeroes # Convert to Mathics' list format # display_len == nr_elements # Truncate, preserving the digits on the right # Adding Indeterminate if the length is greater than the precision <dl> <dt>'$MaxPrecision' <dd>represents the maximum number of digits of precision permitted in abitrary-precision numbers. </dl> >> $MaxPrecision = Infinity >> $MaxPrecision = 10; >> N[Pi, 11] : Requested precision 11 is larger than $MaxPrecision. Using current $MaxPrecision of 10. instead. $MaxPrecision = Infinity specifies that any precision should be allowed. = 3.141592654 #> N[Pi, 10] = 3.141592654 #> $MaxPrecision = x : Cannot set $MaxPrecision to x; value must be a positive number or Infinity. = x #> $MaxPrecision = -Infinity : Cannot set $MaxPrecision to -Infinity; value must be a positive number or Infinity. = -Infinity #> $MaxPrecision = 0 : Cannot set $MaxPrecision to 0; value must be a positive number or Infinity. = 0 #> $MaxPrecision = Infinity; #> $MinPrecision = 15; #> $MaxPrecision = 10 : Cannot set $MaxPrecision such that $MaxPrecision < $MinPrecision. = 10 #> $MaxPrecision = Infinity #> $MinPrecision = 0; <dl> <dt>'$MachineEpsilon' <dd>is the distance between '1.0' and the next nearest representable machine-precision number. </dl> >> $MachineEpsilon = 2.22045*^-16 >> x = 1.0 + {0.4, 0.5, 0.6} $MachineEpsilon; >> x - 1 = {0., 0., 2.22045*^-16} <dl> <dt>'$MachinePrecision' <dd>is the number of decimal digits of precision for machine-precision numbers. </dl> >> $MachinePrecision = 15.9546 <dl> <dt>'MachinePrecision' <dd>represents the precision of machine precision numbers. </dl> >> N[MachinePrecision] = 15.9546 >> N[MachinePrecision, 30] = 15.9545897701910033463281614204 #> N[E, MachinePrecision] = 2.71828 #> Round[MachinePrecision] = 16 <dl> <dt>'$MinPrecision' <dd>represents the minimum number of digits of precision permitted in abitrary-precision numbers. </dl> >> $MinPrecision = 0 >> $MinPrecision = 10; >> N[Pi, 9] : Requested precision 9 is smaller than $MinPrecision. Using current $MinPrecision of 10. instead. = 3.141592654 #> N[Pi, 10] = 3.141592654 #> $MinPrecision = x : Cannot set $MinPrecision to x; value must be a non-negative number. = x #> $MinPrecision = -Infinity : Cannot set $MinPrecision to -Infinity; value must be a non-negative number. = -Infinity #> $MinPrecision = -1 : Cannot set $MinPrecision to -1; value must be a non-negative number. = -1 #> $MinPrecision = 0; #> $MaxPrecision = 10; #> $MinPrecision = 15 : Cannot set $MinPrecision such that $MaxPrecision < $MinPrecision. = 15 #> $MinPrecision = 0 #> $MaxPrecision = Infinity; <dl> <dt>'NumericQ[$expr$]' <dd>tests whether $expr$ represents a numeric quantity. </dl> >> NumericQ[2] = True >> NumericQ[Sqrt[Pi]] = True >> NumberQ[Sqrt[Pi]] = False It is possible to set that a symbol is numeric or not by assign a boolean value to ``NumericQ`` >> NumericQ[a]=True = True >> NumericQ[a] = True >> NumericQ[Sin[a]] = True Clear and ClearAll do not restore the default value. >> Clear[a]; NumericQ[a] = True >> ClearAll[a]; NumericQ[a] = True >> NumericQ[a]=False; NumericQ[a] = False NumericQ can only set to True or False >> NumericQ[a] = 37 : Cannot set NumericQ[a] to 37; the lhs argument must be a symbol and the rhs must be True or False. = 37 <dl> <dt>'Precision[$expr$]' <dd>examines the number of significant digits of $expr$. </dl> This is rather a proof-of-concept than a full implementation. Precision of compound expression is not supported yet. >> Precision[1] = Infinity >> Precision[1/2] = Infinity >> Precision[0.5] = MachinePrecision #> Precision[0.0] = MachinePrecision #> Precision[0.000000000000000000000000000000000000] = 0. #> Precision[-0.0] = MachinePrecision #> Precision[-0.000000000000000000000000000000000000] = 0. #> 1.0000000000000000 // Precision = MachinePrecision #> 1.00000000000000000 // Precision = 17. #> 0.4 + 2.4 I // Precision = MachinePrecision #> Precision[2 + 3 I] = Infinity #> Precision["abc"] = Infinity | 3.243915 | 3 |
Autoencoder/CLASS/VAE_Autoencoder.py | 18191171661/AutoEncoder-tensorflow1.01 | 2 | 6613858 | <gh_stars>1-10
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import os
import tensorflow as tf
import numpy as np
import sklearn.preprocessing as prep
from matplotlib import pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
from scipy.misc import imsave
def xavier_init(fan_in, fan_out, constant = 1):
low = -constant * np.sqrt(6.0 / (fan_in + fan_out))
high = constant * np.sqrt(6.0 / (fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out),
minval = low,
maxval = high,
dtype = tf.float32,
seed = 33)
class VariationalAutoencoder(object):
def __init__(self, n_input, n_hidden, optimizer = tf.train.AdamOptimizer()):
self.n_input = n_input
self.n_hidden = n_hidden
network_weights = self._initialize_weights()
self.weights = network_weights
# model
self.x = tf.placeholder(tf.float32, [None, self.n_input])
self.z_mean = tf.add(tf.matmul(self.x, self.weights['w1']), self.weights['b1'])
self.z_log_sigma_sq = tf.add(tf.matmul(self.x, self.weights['log_sigma_w1']), self.weights['log_sigma_b1'])
# sample from gaussian distribution
eps = tf.random_normal(tf.stack([tf.shape(self.x)[0], self.n_hidden]), 0, 1, dtype = tf.float32)
self.z = tf.add(self.z_mean, tf.multiply(tf.sqrt(tf.exp(self.z_log_sigma_sq)), eps))
self.reconstruction = tf.add(tf.matmul(self.z, self.weights['w2']), self.weights['b2'])
# cost
reconstr_loss = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
latent_loss = -0.5 * tf.reduce_sum(1 + self.z_log_sigma_sq
- tf.square(self.z_mean)
- tf.exp(self.z_log_sigma_sq), 1)
self.cost = tf.reduce_mean(reconstr_loss + latent_loss)
self.optimizer = optimizer.minimize(self.cost)
init = tf.global_variables_initializer()
self.sess = tf.Session()
self.sess.run(init)
def _initialize_weights(self):
all_weights = dict()
all_weights['w1'] = tf.Variable(xavier_init(self.n_input, self.n_hidden))
all_weights['log_sigma_w1'] = tf.Variable(xavier_init(self.n_input, self.n_hidden))
all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32))
all_weights['log_sigma_b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32))
all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype=tf.float32))
all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype=tf.float32))
return all_weights
def partial_fit(self, X):
cost, opt = self.sess.run((self.cost, self.optimizer), feed_dict={self.x: X})
return cost
def calc_total_cost(self, X):
return self.sess.run(self.cost, feed_dict = {self.x: X})
def transform(self, X):
return self.sess.run(self.z_mean, feed_dict={self.x: X})
def generate(self, hidden = None):
if hidden is None:
hidden = np.random.normal(size=self.weights["b1"])
return self.sess.run(self.reconstruction, feed_dict={self.z_mean: hidden})
def reconstruct(self, X):
return self.sess.run(self.reconstruction, feed_dict={self.x: X})
def getWeights(self):
return self.sess.run(self.weights['w1'])
def getBiases(self):
return self.sess.run(self.weights['b1'])
def VAE_main():
print('starting...')
print('loading data,please wait moment...')
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
def min_max_scale(X_train, X_test):
preprocessor = prep.MinMaxScaler().fit(X_train)
X_train = preprocessor.transform(X_train)
X_test = preprocessor.transform(X_test)
return X_train, X_test
def get_random_block_from_data(data, batch_size):
start_index = np.random.randint(0, len(data) - batch_size)
return data[start_index:(start_index + batch_size)]
X_train, X_test = min_max_scale(mnist.train.images, mnist.test.images)
n_samples = int(mnist.train.num_examples)
training_epochs = 20
batch_size = 128
display_step = 1
autoencoder = VariationalAutoencoder(n_input = 784,
n_hidden = 200,
optimizer = tf.train.AdamOptimizer(learning_rate = 0.001))
if os.path.exists('result_VAE'):
os.rename('result_VAE','result_VAE_before')
path = os.getcwd()
print(path)
paths = path + str('\\result_VAE')
print(paths)
os.chdir(paths)
print(os.getcwd())
else:
os.mkdir('result_VAE')
path = os.getcwd()
print(path)
paths = path + str('\\result_VAE')
print(paths)
os.chdir(paths)
print(os.getcwd())
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs = get_random_block_from_data(X_train, batch_size)
# Fit training using batch data
cost = autoencoder.partial_fit(batch_xs)
# Compute average loss
avg_cost += cost / n_samples * batch_size
weights = autoencoder.getWeights
bias = autoencoder.getBiases
#data.append(batch_data)
reconstract = autoencoder.reconstruct(batch_xs)
picture = np.reshape(reconstract, [128, 28, 28, -1])
#print(picture.shape)
result = picture[1:2]
#print(result.shape)
data = np.reshape(result, [28, 28])
imsave('%d.jpg' %(i), data)
# Display logs per epoch step
if epoch % display_step == 0:
print ("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))
print ("Total cost: " + str(autoencoder.calc_total_cost(X_test)))
print('weights is:', weights)
print('bias is:', bias)
print(reconstract.shape)
print('recontruct result is:', reconstract)
plt.plot(data)
plt.show()
print('ending...')
if __name__ == '__main__':
VAE_main() | from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import os
import tensorflow as tf
import numpy as np
import sklearn.preprocessing as prep
from matplotlib import pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
from scipy.misc import imsave
def xavier_init(fan_in, fan_out, constant = 1):
low = -constant * np.sqrt(6.0 / (fan_in + fan_out))
high = constant * np.sqrt(6.0 / (fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out),
minval = low,
maxval = high,
dtype = tf.float32,
seed = 33)
class VariationalAutoencoder(object):
def __init__(self, n_input, n_hidden, optimizer = tf.train.AdamOptimizer()):
self.n_input = n_input
self.n_hidden = n_hidden
network_weights = self._initialize_weights()
self.weights = network_weights
# model
self.x = tf.placeholder(tf.float32, [None, self.n_input])
self.z_mean = tf.add(tf.matmul(self.x, self.weights['w1']), self.weights['b1'])
self.z_log_sigma_sq = tf.add(tf.matmul(self.x, self.weights['log_sigma_w1']), self.weights['log_sigma_b1'])
# sample from gaussian distribution
eps = tf.random_normal(tf.stack([tf.shape(self.x)[0], self.n_hidden]), 0, 1, dtype = tf.float32)
self.z = tf.add(self.z_mean, tf.multiply(tf.sqrt(tf.exp(self.z_log_sigma_sq)), eps))
self.reconstruction = tf.add(tf.matmul(self.z, self.weights['w2']), self.weights['b2'])
# cost
reconstr_loss = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
latent_loss = -0.5 * tf.reduce_sum(1 + self.z_log_sigma_sq
- tf.square(self.z_mean)
- tf.exp(self.z_log_sigma_sq), 1)
self.cost = tf.reduce_mean(reconstr_loss + latent_loss)
self.optimizer = optimizer.minimize(self.cost)
init = tf.global_variables_initializer()
self.sess = tf.Session()
self.sess.run(init)
def _initialize_weights(self):
all_weights = dict()
all_weights['w1'] = tf.Variable(xavier_init(self.n_input, self.n_hidden))
all_weights['log_sigma_w1'] = tf.Variable(xavier_init(self.n_input, self.n_hidden))
all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32))
all_weights['log_sigma_b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32))
all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype=tf.float32))
all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype=tf.float32))
return all_weights
def partial_fit(self, X):
cost, opt = self.sess.run((self.cost, self.optimizer), feed_dict={self.x: X})
return cost
def calc_total_cost(self, X):
return self.sess.run(self.cost, feed_dict = {self.x: X})
def transform(self, X):
return self.sess.run(self.z_mean, feed_dict={self.x: X})
def generate(self, hidden = None):
if hidden is None:
hidden = np.random.normal(size=self.weights["b1"])
return self.sess.run(self.reconstruction, feed_dict={self.z_mean: hidden})
def reconstruct(self, X):
return self.sess.run(self.reconstruction, feed_dict={self.x: X})
def getWeights(self):
return self.sess.run(self.weights['w1'])
def getBiases(self):
return self.sess.run(self.weights['b1'])
def VAE_main():
print('starting...')
print('loading data,please wait moment...')
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
def min_max_scale(X_train, X_test):
preprocessor = prep.MinMaxScaler().fit(X_train)
X_train = preprocessor.transform(X_train)
X_test = preprocessor.transform(X_test)
return X_train, X_test
def get_random_block_from_data(data, batch_size):
start_index = np.random.randint(0, len(data) - batch_size)
return data[start_index:(start_index + batch_size)]
X_train, X_test = min_max_scale(mnist.train.images, mnist.test.images)
n_samples = int(mnist.train.num_examples)
training_epochs = 20
batch_size = 128
display_step = 1
autoencoder = VariationalAutoencoder(n_input = 784,
n_hidden = 200,
optimizer = tf.train.AdamOptimizer(learning_rate = 0.001))
if os.path.exists('result_VAE'):
os.rename('result_VAE','result_VAE_before')
path = os.getcwd()
print(path)
paths = path + str('\\result_VAE')
print(paths)
os.chdir(paths)
print(os.getcwd())
else:
os.mkdir('result_VAE')
path = os.getcwd()
print(path)
paths = path + str('\\result_VAE')
print(paths)
os.chdir(paths)
print(os.getcwd())
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs = get_random_block_from_data(X_train, batch_size)
# Fit training using batch data
cost = autoencoder.partial_fit(batch_xs)
# Compute average loss
avg_cost += cost / n_samples * batch_size
weights = autoencoder.getWeights
bias = autoencoder.getBiases
#data.append(batch_data)
reconstract = autoencoder.reconstruct(batch_xs)
picture = np.reshape(reconstract, [128, 28, 28, -1])
#print(picture.shape)
result = picture[1:2]
#print(result.shape)
data = np.reshape(result, [28, 28])
imsave('%d.jpg' %(i), data)
# Display logs per epoch step
if epoch % display_step == 0:
print ("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))
print ("Total cost: " + str(autoencoder.calc_total_cost(X_test)))
print('weights is:', weights)
print('bias is:', bias)
print(reconstract.shape)
print('recontruct result is:', reconstract)
plt.plot(data)
plt.show()
print('ending...')
if __name__ == '__main__':
VAE_main() | en | 0.655454 | # model # sample from gaussian distribution # cost # Loop over all batches # Fit training using batch data # Compute average loss #data.append(batch_data) #print(picture.shape) #print(result.shape) # Display logs per epoch step | 2.540627 | 3 |
UI/misc.py | rtix/course_bot | 6 | 6613859 | <gh_stars>1-10
import json
import os
import time
from Bot.config import MESSAGE_DIR
messages = {
i.split('.')[0]: json.load(open(os.path.join(MESSAGE_DIR, i))) for i in os.listdir(MESSAGE_DIR)
}
def to_dtime(utime):
return time.strftime("%d %b %Y", time.localtime(float(utime))) if utime else None
| import json
import os
import time
from Bot.config import MESSAGE_DIR
messages = {
i.split('.')[0]: json.load(open(os.path.join(MESSAGE_DIR, i))) for i in os.listdir(MESSAGE_DIR)
}
def to_dtime(utime):
return time.strftime("%d %b %Y", time.localtime(float(utime))) if utime else None | none | 1 | 2.409559 | 2 | |
mitmproxy/dump.py | e7appew/pkg-mitmproxy | 0 | 6613860 | from __future__ import absolute_import, print_function, division
from typing import Optional # noqa
import typing # noqa
from mitmproxy import controller
from mitmproxy import exceptions
from mitmproxy import flow
from mitmproxy import builtins
from mitmproxy import options
from mitmproxy.builtins import dumper, termlog
from netlib import tcp
class DumpError(Exception):
pass
class Options(options.Options):
def __init__(
self,
keepserving=False, # type: bool
filtstr=None, # type: Optional[str]
flow_detail=1, # type: int
tfile=None, # type: Optional[typing.io.TextIO]
**kwargs
):
self.filtstr = filtstr
self.flow_detail = flow_detail
self.keepserving = keepserving
self.tfile = tfile
super(Options, self).__init__(**kwargs)
class DumpMaster(flow.FlowMaster):
def __init__(self, server, options):
flow.FlowMaster.__init__(self, options, server, flow.DummyState())
self.has_errored = False
self.addons.add(termlog.TermLog())
self.addons.add(*builtins.default_addons())
self.addons.add(dumper.Dumper())
# This line is just for type hinting
self.options = self.options # type: Options
self.set_stream_large_bodies(options.stream_large_bodies)
if not self.options.no_server and server:
self.add_log(
"Proxy server listening at http://{}".format(server.address),
"info"
)
if self.server and self.options.http2 and not tcp.HAS_ALPN: # pragma: no cover
self.add_log(
"ALPN support missing (OpenSSL 1.0.2+ required)!\n"
"HTTP/2 is disabled. Use --no-http2 to silence this warning.",
"error"
)
if options.rfile:
try:
self.load_flows_file(options.rfile)
except exceptions.FlowReadException as v:
self.add_log("Flow file corrupted.", "error")
raise DumpError(v)
if self.options.app:
self.start_app(self.options.app_host, self.options.app_port)
def _readflow(self, paths):
"""
Utitility function that reads a list of flows
or raises a DumpError if that fails.
"""
try:
return flow.read_flows_from_paths(paths)
except exceptions.FlowReadException as e:
raise DumpError(str(e))
@controller.handler
def log(self, e):
if e.level == "error":
self.has_errored = True
def run(self): # pragma: no cover
if self.options.rfile and not self.options.keepserving:
self.addons.done()
return
super(DumpMaster, self).run()
| from __future__ import absolute_import, print_function, division
from typing import Optional # noqa
import typing # noqa
from mitmproxy import controller
from mitmproxy import exceptions
from mitmproxy import flow
from mitmproxy import builtins
from mitmproxy import options
from mitmproxy.builtins import dumper, termlog
from netlib import tcp
class DumpError(Exception):
pass
class Options(options.Options):
def __init__(
self,
keepserving=False, # type: bool
filtstr=None, # type: Optional[str]
flow_detail=1, # type: int
tfile=None, # type: Optional[typing.io.TextIO]
**kwargs
):
self.filtstr = filtstr
self.flow_detail = flow_detail
self.keepserving = keepserving
self.tfile = tfile
super(Options, self).__init__(**kwargs)
class DumpMaster(flow.FlowMaster):
def __init__(self, server, options):
flow.FlowMaster.__init__(self, options, server, flow.DummyState())
self.has_errored = False
self.addons.add(termlog.TermLog())
self.addons.add(*builtins.default_addons())
self.addons.add(dumper.Dumper())
# This line is just for type hinting
self.options = self.options # type: Options
self.set_stream_large_bodies(options.stream_large_bodies)
if not self.options.no_server and server:
self.add_log(
"Proxy server listening at http://{}".format(server.address),
"info"
)
if self.server and self.options.http2 and not tcp.HAS_ALPN: # pragma: no cover
self.add_log(
"ALPN support missing (OpenSSL 1.0.2+ required)!\n"
"HTTP/2 is disabled. Use --no-http2 to silence this warning.",
"error"
)
if options.rfile:
try:
self.load_flows_file(options.rfile)
except exceptions.FlowReadException as v:
self.add_log("Flow file corrupted.", "error")
raise DumpError(v)
if self.options.app:
self.start_app(self.options.app_host, self.options.app_port)
def _readflow(self, paths):
"""
Utitility function that reads a list of flows
or raises a DumpError if that fails.
"""
try:
return flow.read_flows_from_paths(paths)
except exceptions.FlowReadException as e:
raise DumpError(str(e))
@controller.handler
def log(self, e):
if e.level == "error":
self.has_errored = True
def run(self): # pragma: no cover
if self.options.rfile and not self.options.keepserving:
self.addons.done()
return
super(DumpMaster, self).run()
| en | 0.703133 | # noqa # noqa # type: bool # type: Optional[str] # type: int # type: Optional[typing.io.TextIO] # This line is just for type hinting # type: Options # pragma: no cover Utitility function that reads a list of flows or raises a DumpError if that fails. # pragma: no cover | 2.040995 | 2 |
cluster/generate_clusters.py | ptbrown1729/exact_diag | 2 | 6613861 | <filename>cluster/generate_clusters.py<gh_stars>1-10
import pickle
import os
import sys
from ed_nlce import *
def generate_clusters(fname_cluster_dat, max_cluster_order):
# if data file already exists, read number of clusters and return
if os.path.isfile(fname_cluster_dat):
with open(fname_cluster_dat, 'rb') as f:
data = pickle.load(f)
cluster_list = data[2]
return len(cluster_list)
# if data file doesn't already exist, we must generate the clusters and create the file
clusters_list, cluster_multiplicities, sub_cluster_mult, order_start_indices = \
get_all_clusters_with_subclusters(max_cluster_order)
cluster_multiplicities = cluster_multiplicities[None, :]
# save cluster data
data_clusters = {"max_cluster_order": max_cluster_order, "cluster_multiplicities": cluster_multiplicities,
"cluster_list": clusters_list, "sub_cluster_mult": sub_cluster_mult,
"order_start_indices": order_start_indices}
with open(fname_cluster_dat, 'wb') as f:
pickle.dump(data_clusters, f)
return len(clusters_list)
if __name__ == "__main__":
output_fname = sys.argv[1]
max_cluster_order = int(sys.argv[2])
num_clusters = generate_clusters(output_fname, max_cluster_order)
print(num_clusters)
| <filename>cluster/generate_clusters.py<gh_stars>1-10
import pickle
import os
import sys
from ed_nlce import *
def generate_clusters(fname_cluster_dat, max_cluster_order):
# if data file already exists, read number of clusters and return
if os.path.isfile(fname_cluster_dat):
with open(fname_cluster_dat, 'rb') as f:
data = pickle.load(f)
cluster_list = data[2]
return len(cluster_list)
# if data file doesn't already exist, we must generate the clusters and create the file
clusters_list, cluster_multiplicities, sub_cluster_mult, order_start_indices = \
get_all_clusters_with_subclusters(max_cluster_order)
cluster_multiplicities = cluster_multiplicities[None, :]
# save cluster data
data_clusters = {"max_cluster_order": max_cluster_order, "cluster_multiplicities": cluster_multiplicities,
"cluster_list": clusters_list, "sub_cluster_mult": sub_cluster_mult,
"order_start_indices": order_start_indices}
with open(fname_cluster_dat, 'wb') as f:
pickle.dump(data_clusters, f)
return len(clusters_list)
if __name__ == "__main__":
output_fname = sys.argv[1]
max_cluster_order = int(sys.argv[2])
num_clusters = generate_clusters(output_fname, max_cluster_order)
print(num_clusters)
| en | 0.846822 | # if data file already exists, read number of clusters and return # if data file doesn't already exist, we must generate the clusters and create the file # save cluster data | 3.117718 | 3 |
mockable_doctests/__init__.py | aaron-p-lehmann/mockable_doctests | 0 | 6613862 | <gh_stars>0
# pylint: skip-file
# flake8: noqa
"""
Import submodules __all__ members so that they can be accessed directly through main package import.
This allows downstream code to access all members through package import without needing subpackages and
becoming dependent on internal package structure
"""
from mockable_doctests import mockable
from mockable_doctests.mockable import *
__all__ = (
list(getattr(mockable, "__all__", []))
)
__version__ = '1.0.0'
| # pylint: skip-file
# flake8: noqa
"""
Import submodules __all__ members so that they can be accessed directly through main package import.
This allows downstream code to access all members through package import without needing subpackages and
becoming dependent on internal package structure
"""
from mockable_doctests import mockable
from mockable_doctests.mockable import *
__all__ = (
list(getattr(mockable, "__all__", []))
)
__version__ = '1.0.0' | en | 0.796037 | # pylint: skip-file # flake8: noqa Import submodules __all__ members so that they can be accessed directly through main package import. This allows downstream code to access all members through package import without needing subpackages and becoming dependent on internal package structure | 1.550227 | 2 |
7_kyu/shortest_word.py | dimishpatriot/way_on_the_highway | 0 | 6613863 | """Simple, given a string of words, return the length of the shortest word(s).
String will never be empty and you do not need to account for different data types.
"""
import pytest
@pytest.mark.parametrize("test, result", [
("bitcoin take over the world maybe who knows perhaps", 3),
("turns out random test cases are easier than writing out basic ones", 3),
("lets talk about javascript the best language", 3),
("i want to travel the world writing code one day", 1),
("Lets all go on holiday somewhere very cold", 2),
("ok", 2),
])
def test_short(test, result):
assert find_short(test) == result
def find_short(s: str) -> int:
all_words = s.split()
ln = len(all_words[0])
for word in all_words[1:]:
ln = min(ln, len(word))
return ln
| """Simple, given a string of words, return the length of the shortest word(s).
String will never be empty and you do not need to account for different data types.
"""
import pytest
@pytest.mark.parametrize("test, result", [
("bitcoin take over the world maybe who knows perhaps", 3),
("turns out random test cases are easier than writing out basic ones", 3),
("lets talk about javascript the best language", 3),
("i want to travel the world writing code one day", 1),
("Lets all go on holiday somewhere very cold", 2),
("ok", 2),
])
def test_short(test, result):
assert find_short(test) == result
def find_short(s: str) -> int:
all_words = s.split()
ln = len(all_words[0])
for word in all_words[1:]:
ln = min(ln, len(word))
return ln
| en | 0.787634 | Simple, given a string of words, return the length of the shortest word(s). String will never be empty and you do not need to account for different data types. | 3.915062 | 4 |
scripts/sortshapekey.py | lowteq/blenderScripts | 7 | 6613864 | <reponame>lowteq/blenderScripts
import bpy
import bmesh
from bpy.props import BoolProperty
bl_info = {
"name" : "SortShapekeyPlugin",
"author" : "lowteq",
"version" : (0,2),
"blender" : (2,80, 0),
"location" : "View3D > Object",
"description" : "Sort shapekeys",
"warning" : "",
"wiki_url" : "https://github.com/lowteq/blenderScripts",
"tracker_url" : "",
"category" : "Object"
}
class OBJECT_OT_sort_shapekey(bpy.types.Operator):
bl_idname = "object.sort_shapekey"
bl_label = "Sort_Shapekey"
bl_description = "Sort shapekeys"
bl_options = {'REGISTER', 'UNDO'}
reverse : BoolProperty(
default=False,
options = {'HIDDEN'}
)
def execute(self, context):
bpy.ops.object.mode_set(mode='OBJECT')
obj = bpy.context.active_object
src = obj
if obj.data.shape_keys == None:
bpy.ops.object.shape_key_add(from_mix=False)
shapekeynamelist = [src_key.name for src_key in src.data.shape_keys.key_blocks[1:]] # skip basis
sortedlist = sorted(shapekeynamelist,key=str.lower,reverse=self.reverse)
for index,name in enumerate(sortedlist):
src_key_ix = src.data.shape_keys.key_blocks.find(name)
dst_key_ix = index + 1
#move shapekey at src_key_ix to dst_key_ix
bpy.context.object.active_shape_key_index = src_key_ix
if dst_key_ix > src_key_ix:
opstype = "DOWN"
else:
opstype = "UP"
for i in range(abs(src_key_ix - dst_key_ix)):
bpy.ops.object.shape_key_move(type=opstype)
bpy.ops.object.mode_set(mode='OBJECT')
return {'FINISHED'}
def menu_func(self, context):
self.layout.separator()
asc = self.layout.operator(OBJECT_OT_sort_shapekey.bl_idname,text="Sort Shapekey")
asc.reverse = False
desc = self.layout.operator(OBJECT_OT_sort_shapekey.bl_idname,text="Sort Shapekey reverse")
desc.reverse = True
classes = [
OBJECT_OT_sort_shapekey,
]
def register():
for c in classes:
bpy.utils.register_class(c)
bpy.types.MESH_MT_shape_key_context_menu.append(menu_func)
def unregister():
bpy.types.MESH_MT_shape_key_context_menu.remove(menu_func)
for c in classes:
bpy.utils.unregister_class(c)
if __name__ == "__main__":
register() | import bpy
import bmesh
from bpy.props import BoolProperty
bl_info = {
"name" : "SortShapekeyPlugin",
"author" : "lowteq",
"version" : (0,2),
"blender" : (2,80, 0),
"location" : "View3D > Object",
"description" : "Sort shapekeys",
"warning" : "",
"wiki_url" : "https://github.com/lowteq/blenderScripts",
"tracker_url" : "",
"category" : "Object"
}
class OBJECT_OT_sort_shapekey(bpy.types.Operator):
bl_idname = "object.sort_shapekey"
bl_label = "Sort_Shapekey"
bl_description = "Sort shapekeys"
bl_options = {'REGISTER', 'UNDO'}
reverse : BoolProperty(
default=False,
options = {'HIDDEN'}
)
def execute(self, context):
bpy.ops.object.mode_set(mode='OBJECT')
obj = bpy.context.active_object
src = obj
if obj.data.shape_keys == None:
bpy.ops.object.shape_key_add(from_mix=False)
shapekeynamelist = [src_key.name for src_key in src.data.shape_keys.key_blocks[1:]] # skip basis
sortedlist = sorted(shapekeynamelist,key=str.lower,reverse=self.reverse)
for index,name in enumerate(sortedlist):
src_key_ix = src.data.shape_keys.key_blocks.find(name)
dst_key_ix = index + 1
#move shapekey at src_key_ix to dst_key_ix
bpy.context.object.active_shape_key_index = src_key_ix
if dst_key_ix > src_key_ix:
opstype = "DOWN"
else:
opstype = "UP"
for i in range(abs(src_key_ix - dst_key_ix)):
bpy.ops.object.shape_key_move(type=opstype)
bpy.ops.object.mode_set(mode='OBJECT')
return {'FINISHED'}
def menu_func(self, context):
self.layout.separator()
asc = self.layout.operator(OBJECT_OT_sort_shapekey.bl_idname,text="Sort Shapekey")
asc.reverse = False
desc = self.layout.operator(OBJECT_OT_sort_shapekey.bl_idname,text="Sort Shapekey reverse")
desc.reverse = True
classes = [
OBJECT_OT_sort_shapekey,
]
def register():
for c in classes:
bpy.utils.register_class(c)
bpy.types.MESH_MT_shape_key_context_menu.append(menu_func)
def unregister():
bpy.types.MESH_MT_shape_key_context_menu.remove(menu_func)
for c in classes:
bpy.utils.unregister_class(c)
if __name__ == "__main__":
register() | en | 0.41211 | # skip basis #move shapekey at src_key_ix to dst_key_ix | 2.248872 | 2 |
labMT-simple/labMTsimple/data/Pattern/load_pattern.py | TransientObject/labMTComparison | 0 | 6613865 | <reponame>TransientObject/labMTComparison
# coding: utf-8
import xml.etree.ElementTree as etree
tree = etree.parse('en-sentiment.xml')
root = tree.getroot()
print(root)
# for child in root:
# print(child)
# print(child.tag)
# print(child.form)
# print(child.attrib['form'])
my_dict = {}
for child in root:
my_dict[child.attrib['form']] = dict()
print(len(my_dict))
print(root[0].attrib)
for i,child in enumerate(root):
my_dict[child.attrib['form']] = (i,float(child.attrib['polarity']))
print(len(my_dict))
my_dict['13th']
pos_words = [word for word in my_dict if my_dict[word][1] > 0]
print(len(pos_words))
neg_words = [word for word in my_dict if my_dict[word][1] < 0]
print(len(neg_words))
| # coding: utf-8
import xml.etree.ElementTree as etree
tree = etree.parse('en-sentiment.xml')
root = tree.getroot()
print(root)
# for child in root:
# print(child)
# print(child.tag)
# print(child.form)
# print(child.attrib['form'])
my_dict = {}
for child in root:
my_dict[child.attrib['form']] = dict()
print(len(my_dict))
print(root[0].attrib)
for i,child in enumerate(root):
my_dict[child.attrib['form']] = (i,float(child.attrib['polarity']))
print(len(my_dict))
my_dict['13th']
pos_words = [word for word in my_dict if my_dict[word][1] > 0]
print(len(pos_words))
neg_words = [word for word in my_dict if my_dict[word][1] < 0]
print(len(neg_words)) | en | 0.595838 | # coding: utf-8 # for child in root: # print(child) # print(child.tag) # print(child.form) # print(child.attrib['form']) | 2.949176 | 3 |
_src/om2py4w/4wex0/createstorydata.py | wwshen/OMOOC2py | 0 | 6613866 | <gh_stars>0
import sqlite3
fn = 'storychain.db'
cnct = sqlite3.connect(fn)
cnct.execute('''CREATE TABLE chains
(title char(20) NOT NULL,
ct INTEGER,
main char(400) NOT NULL,
userid text,
datetime text)
''')
cnct.execute('''INSERT INTO chains VALUES
("wolf",
0,
"There was once a boy-wolf.",
"wwshen",
"2015-11-15 15:00:00")
''')
cnct.execute('''INSERT INTO chains VALUES
("wolf",
1,
"He liked walking and dancing in the rain.",
"wwshen",
"2015-11-15 15:00:00")
''')
cnct.execute('''INSERT INTO chains VALUES
("my dad",
0,
"My dad is awesome!",
"wwshen",
"2015-11-15 15:00:00")
''')
cnct.commit() | import sqlite3
fn = 'storychain.db'
cnct = sqlite3.connect(fn)
cnct.execute('''CREATE TABLE chains
(title char(20) NOT NULL,
ct INTEGER,
main char(400) NOT NULL,
userid text,
datetime text)
''')
cnct.execute('''INSERT INTO chains VALUES
("wolf",
0,
"There was once a boy-wolf.",
"wwshen",
"2015-11-15 15:00:00")
''')
cnct.execute('''INSERT INTO chains VALUES
("wolf",
1,
"He liked walking and dancing in the rain.",
"wwshen",
"2015-11-15 15:00:00")
''')
cnct.execute('''INSERT INTO chains VALUES
("my dad",
0,
"My dad is awesome!",
"wwshen",
"2015-11-15 15:00:00")
''')
cnct.commit() | en | 0.712446 | CREATE TABLE chains (title char(20) NOT NULL, ct INTEGER, main char(400) NOT NULL, userid text, datetime text) INSERT INTO chains VALUES ("wolf", 0, "There was once a boy-wolf.", "wwshen", "2015-11-15 15:00:00") INSERT INTO chains VALUES ("wolf", 1, "He liked walking and dancing in the rain.", "wwshen", "2015-11-15 15:00:00") INSERT INTO chains VALUES ("my dad", 0, "My dad is awesome!", "wwshen", "2015-11-15 15:00:00") | 2.867179 | 3 |
wow_watches.py | ant3h/knife_scraper | 3 | 6613867 | <filename>wow_watches.py
#!/usr/bin/env python
# bhq_query.py - module for sopel to query blade head quarters site for knife data
#
# Copyright (c) 2015,2016 <NAME> <<EMAIL>>
#
# See LICENSE for terms of usage, modification and redistribution.
from sopel import *
@module.commands('wtc')
def knife(bot, trigger):
bot.reply("Look I respond to the wtc command now!")
| <filename>wow_watches.py
#!/usr/bin/env python
# bhq_query.py - module for sopel to query blade head quarters site for knife data
#
# Copyright (c) 2015,2016 <NAME> <<EMAIL>>
#
# See LICENSE for terms of usage, modification and redistribution.
from sopel import *
@module.commands('wtc')
def knife(bot, trigger):
bot.reply("Look I respond to the wtc command now!")
| en | 0.609593 | #!/usr/bin/env python # bhq_query.py - module for sopel to query blade head quarters site for knife data # # Copyright (c) 2015,2016 <NAME> <<EMAIL>> # # See LICENSE for terms of usage, modification and redistribution. | 1.773706 | 2 |
publish/update_updatejson.py | makamys/WorldTweaks | 2 | 6613868 | import json
import subprocess
jsonPath = "../updatejson/update.json"
# lol
fullFormat = "updateJsonFullVersionFormat=true" in open("gradle.properties", "r", encoding="utf8").read()
data = json.load(open(jsonPath, "r", encoding="utf8"))
ver = open("version.txt", "r").read()
for gameVer in json.load(open("gameVersions.json", "r")).keys():
modVer = "{}".format(ver) if not fullFormat else "{}-{}".format(gameVer, ver)
if gameVer not in data:
data[gameVer] = {}
data[gameVer][modVer] = ""
data["promos"]["{}-latest".format(gameVer)] = modVer
json.dump(data, open(jsonPath, "w", encoding="utf8"), indent=2)
subprocess.run(["git", "add", jsonPath])
subprocess.run(["git", "commit", "-m", "Update update json"])
subprocess.run(["git", "push"]) | import json
import subprocess
jsonPath = "../updatejson/update.json"
# lol
fullFormat = "updateJsonFullVersionFormat=true" in open("gradle.properties", "r", encoding="utf8").read()
data = json.load(open(jsonPath, "r", encoding="utf8"))
ver = open("version.txt", "r").read()
for gameVer in json.load(open("gameVersions.json", "r")).keys():
modVer = "{}".format(ver) if not fullFormat else "{}-{}".format(gameVer, ver)
if gameVer not in data:
data[gameVer] = {}
data[gameVer][modVer] = ""
data["promos"]["{}-latest".format(gameVer)] = modVer
json.dump(data, open(jsonPath, "w", encoding="utf8"), indent=2)
subprocess.run(["git", "add", jsonPath])
subprocess.run(["git", "commit", "-m", "Update update json"])
subprocess.run(["git", "push"]) | none | 1 | 2.362502 | 2 | |
build/lib/tests/UsersRepository_test.py | athanikos/cryptodataaccess | 0 | 6613869 | <reponame>athanikos/cryptodataaccess<gh_stars>0
from datetime import datetime
import mock
from bson import ObjectId
from cryptomodel.operations import OPERATIONS
from cryptomodel.cryptostore import user_channel, user_notification, user_settings
from cryptomodel.coinmarket import prices
from cryptomodel.fixer import exchange_rates
from cryptodataaccess.Memory import USER_NOTIFICATIONS_MEMORY_KEY, USER_SETTINGS_MEMORY_KEY, USER_CHANNELS_MEMORY_KEY
from cryptodataaccess.Rates.RatesMongoStore import RatesMongoStore
from cryptodataaccess.Users.UsersMongoStore import UsersMongoStore
from cryptodataaccess.config import configure_app
from cryptodataaccess.Users.UsersRepository import UsersRepository
from cryptodataaccess.Rates.RatesRepository import RatesRepository
import pytest
from cryptodataaccess.helpers import convert_to_int_timestamp
from cryptodataaccess.tests.helpers import insert_prices_record, insert_exchange_record
from cryptodataaccess import helpers
@pytest.fixture(scope='module')
def mock_log():
with mock.patch("cryptodataaccess.helpers.log_error"
) as _mock:
_mock.return_value = True
yield _mock
def test_fetch_symbol_rates():
config = configure_app()
rates_store = RatesMongoStore(config, mock_log)
repo = RatesRepository(rates_store)
helpers.do_local_connect(config)
prices.objects.all().delete()
insert_prices_record()
objs = repo.fetch_symbol_rates()
assert (len(objs.rates) == 100)
assert (objs.rates['BTC'].price == 8101.799293468747)
def test_fetch_exchange_rates():
config = configure_app()
rates_store = RatesMongoStore(config, mock_log)
repo = RatesRepository(rates_store)
exchange_rates.objects.all().delete()
insert_exchange_record()
objs = repo.fetch_latest_exchange_rates_to_date('1900-01-01')
assert (len(objs) == 0)
objs = repo.fetch_latest_exchange_rates_to_date('2020-07-04')
assert (len(objs) == 1)
objs = repo.fetch_latest_exchange_rates_to_date('2020-07-03')
assert (len(objs) == 1)
assert (objs[0].rates.AED == 4.127332)
objs = repo.fetch_latest_exchange_rates_to_date('2020-07-02')
assert (len(objs) == 0)
def test_fetch_prices_and_symbols():
config = configure_app()
rates_store = RatesMongoStore(config, mock_log)
repo = RatesRepository(rates_store)
prices.objects.all().delete()
insert_prices_record()
dt = convert_to_int_timestamp(datetime(year=2020, month = 7 , day = 3 ))
objs = repo.fetch_latest_prices_to_date(dt)
assert (len(objs) == 1)
objs = repo.fetch_latest_prices_to_date(dt)
dt = convert_to_int_timestamp(datetime(year=2020, month=7, day=4))
assert (len(objs) == 1)
symbols = repo.fetch_symbols()
assert (len(symbols) == 100)
def test_insert_user_channel():
config = configure_app()
rates_store = UsersMongoStore(config, mock_log)
repo = UsersRepository(rates_store)
helpers.do_local_connect(config)
user_channel.objects.all().delete()
repo.add_user_channel(user_id=1, chat_id='1', channel_type='telegram',
source_id=ObjectId('666f6f2d6261722d71757578'))
repo.commit()
uc = repo.memories[USER_CHANNELS_MEMORY_KEY].items[0]
assert (uc.channel_type == 'telegram')
assert (uc.operation == OPERATIONS.ADDED.name)
def test_insert_user_setting():
config = configure_app()
users_store = UsersMongoStore(config, mock_log)
repo = UsersRepository(users_store)
helpers.do_local_connect(config)
user_settings.objects.all().delete()
repo.add_user_settings(user_id=1, preferred_currency='da', source_id=ObjectId('666f6f2d6261722d71757578'))
repo.commit()
uc = repo.memories[USER_SETTINGS_MEMORY_KEY].items[0]
assert (uc.preferred_currency == 'da')
assert (uc.operation == OPERATIONS.ADDED.name)
def test_update_notification_when_does_not_exist_throws_ValueError():
config = configure_app()
store = UsersMongoStore(config, mock_log)
repo = UsersRepository(store)
helpers.do_local_connect(config)
user_notification.objects.all().delete()
with pytest.raises(ValueError):
repo.edit_notification(ObjectId('666f6f2d6261722d71757578'), 1, 'nik2', 'email', 'some expr', 1, 1, True,
'telegram', 'expr to send', ObjectId('666f6f2d6261722d71757578'))
repo.commit()
def test_update_notification():
config = configure_app()
store = UsersMongoStore(config, mock_log)
repo = UsersRepository(store)
helpers.do_local_connect(config)
user_notification.objects.all().delete()
repo.add_notification(user_id=1, user_name='username', user_email='email',
expression_to_evaluate='some expr', check_every_seconds=1, check_times=1,
is_active=True, channel_type='telegram',
fields_to_send="dsd",
source_id=ObjectId('666f6f2d6261722d71757578'))
repo.commit()
un = repo.memories[USER_NOTIFICATIONS_MEMORY_KEY].items[0]
repo.edit_notification(in_id=un.id,
user_id=1, user_name='username2', user_email='email',
expression_to_evaluate='some expr', check_every_seconds=1, check_times=1,
is_active=True, channel_type='telegram',
fields_to_send="dsd",
source_id=ObjectId('666f6f2d6261722d71757578'))
repo.commit()
un = repo.memories[USER_NOTIFICATIONS_MEMORY_KEY].items[1]
assert (un.user_name == "username2")
def test_delete_notification_when_exists():
config = configure_app()
store = UsersMongoStore(config, mock_log)
repo = UsersRepository(store)
helpers.do_local_connect(config)
user_notification.objects.all().delete()
repo.add_notification(user_id=1, user_name='username', user_email='email',
expression_to_evaluate='some expr', check_every_seconds=1, check_times=1,
is_active=True, channel_type='telegram', fields_to_send="dsd",
source_id=ObjectId('666f6f2d6261722d71757578'))
repo.commit()
ut = repo.memories[USER_NOTIFICATIONS_MEMORY_KEY].items[0]
assert (len(user_notification.objects) == 1)
ut = repo.remove_notification(ut.id)
repo.commit()
assert (len(user_notification.objects) == 0)
def test_delete_user_notification_when_exists_by_source_id():
config = configure_app()
store = UsersMongoStore(config, mock_log)
repo = UsersRepository(store)
helpers.do_local_connect(config)
user_notification.objects.all().delete()
repo.add_notification(user_id=1, user_name='username', user_email='email',
expression_to_evaluate='some expr', check_every_seconds=1, check_times=1,
is_active=True, channel_type='telegram',
fields_to_send="dsd",
source_id=ObjectId('666f6f2d6261722d71757578'))
repo.commit()
ut = repo.memories[USER_NOTIFICATIONS_MEMORY_KEY].items[0]
assert (len(user_notification.objects) == 1)
store.do_delete_user_notification_by_source_id(source_id=ObjectId('666f6f2d6261722d71757578'))
assert (len(user_notification.objects) == 0)
| from datetime import datetime
import mock
from bson import ObjectId
from cryptomodel.operations import OPERATIONS
from cryptomodel.cryptostore import user_channel, user_notification, user_settings
from cryptomodel.coinmarket import prices
from cryptomodel.fixer import exchange_rates
from cryptodataaccess.Memory import USER_NOTIFICATIONS_MEMORY_KEY, USER_SETTINGS_MEMORY_KEY, USER_CHANNELS_MEMORY_KEY
from cryptodataaccess.Rates.RatesMongoStore import RatesMongoStore
from cryptodataaccess.Users.UsersMongoStore import UsersMongoStore
from cryptodataaccess.config import configure_app
from cryptodataaccess.Users.UsersRepository import UsersRepository
from cryptodataaccess.Rates.RatesRepository import RatesRepository
import pytest
from cryptodataaccess.helpers import convert_to_int_timestamp
from cryptodataaccess.tests.helpers import insert_prices_record, insert_exchange_record
from cryptodataaccess import helpers
@pytest.fixture(scope='module')
def mock_log():
with mock.patch("cryptodataaccess.helpers.log_error"
) as _mock:
_mock.return_value = True
yield _mock
def test_fetch_symbol_rates():
config = configure_app()
rates_store = RatesMongoStore(config, mock_log)
repo = RatesRepository(rates_store)
helpers.do_local_connect(config)
prices.objects.all().delete()
insert_prices_record()
objs = repo.fetch_symbol_rates()
assert (len(objs.rates) == 100)
assert (objs.rates['BTC'].price == 8101.799293468747)
def test_fetch_exchange_rates():
config = configure_app()
rates_store = RatesMongoStore(config, mock_log)
repo = RatesRepository(rates_store)
exchange_rates.objects.all().delete()
insert_exchange_record()
objs = repo.fetch_latest_exchange_rates_to_date('1900-01-01')
assert (len(objs) == 0)
objs = repo.fetch_latest_exchange_rates_to_date('2020-07-04')
assert (len(objs) == 1)
objs = repo.fetch_latest_exchange_rates_to_date('2020-07-03')
assert (len(objs) == 1)
assert (objs[0].rates.AED == 4.127332)
objs = repo.fetch_latest_exchange_rates_to_date('2020-07-02')
assert (len(objs) == 0)
def test_fetch_prices_and_symbols():
config = configure_app()
rates_store = RatesMongoStore(config, mock_log)
repo = RatesRepository(rates_store)
prices.objects.all().delete()
insert_prices_record()
dt = convert_to_int_timestamp(datetime(year=2020, month = 7 , day = 3 ))
objs = repo.fetch_latest_prices_to_date(dt)
assert (len(objs) == 1)
objs = repo.fetch_latest_prices_to_date(dt)
dt = convert_to_int_timestamp(datetime(year=2020, month=7, day=4))
assert (len(objs) == 1)
symbols = repo.fetch_symbols()
assert (len(symbols) == 100)
def test_insert_user_channel():
config = configure_app()
rates_store = UsersMongoStore(config, mock_log)
repo = UsersRepository(rates_store)
helpers.do_local_connect(config)
user_channel.objects.all().delete()
repo.add_user_channel(user_id=1, chat_id='1', channel_type='telegram',
source_id=ObjectId('666f6f2d6261722d71757578'))
repo.commit()
uc = repo.memories[USER_CHANNELS_MEMORY_KEY].items[0]
assert (uc.channel_type == 'telegram')
assert (uc.operation == OPERATIONS.ADDED.name)
def test_insert_user_setting():
config = configure_app()
users_store = UsersMongoStore(config, mock_log)
repo = UsersRepository(users_store)
helpers.do_local_connect(config)
user_settings.objects.all().delete()
repo.add_user_settings(user_id=1, preferred_currency='da', source_id=ObjectId('666f6f2d6261722d71757578'))
repo.commit()
uc = repo.memories[USER_SETTINGS_MEMORY_KEY].items[0]
assert (uc.preferred_currency == 'da')
assert (uc.operation == OPERATIONS.ADDED.name)
def test_update_notification_when_does_not_exist_throws_ValueError():
config = configure_app()
store = UsersMongoStore(config, mock_log)
repo = UsersRepository(store)
helpers.do_local_connect(config)
user_notification.objects.all().delete()
with pytest.raises(ValueError):
repo.edit_notification(ObjectId('666f6f2d6261722d71757578'), 1, 'nik2', 'email', 'some expr', 1, 1, True,
'telegram', 'expr to send', ObjectId('666f6f2d6261722d71757578'))
repo.commit()
def test_update_notification():
config = configure_app()
store = UsersMongoStore(config, mock_log)
repo = UsersRepository(store)
helpers.do_local_connect(config)
user_notification.objects.all().delete()
repo.add_notification(user_id=1, user_name='username', user_email='email',
expression_to_evaluate='some expr', check_every_seconds=1, check_times=1,
is_active=True, channel_type='telegram',
fields_to_send="dsd",
source_id=ObjectId('666f6f2d6261722d71757578'))
repo.commit()
un = repo.memories[USER_NOTIFICATIONS_MEMORY_KEY].items[0]
repo.edit_notification(in_id=un.id,
user_id=1, user_name='username2', user_email='email',
expression_to_evaluate='some expr', check_every_seconds=1, check_times=1,
is_active=True, channel_type='telegram',
fields_to_send="dsd",
source_id=ObjectId('666f6f2d6261722d71757578'))
repo.commit()
un = repo.memories[USER_NOTIFICATIONS_MEMORY_KEY].items[1]
assert (un.user_name == "username2")
def test_delete_notification_when_exists():
config = configure_app()
store = UsersMongoStore(config, mock_log)
repo = UsersRepository(store)
helpers.do_local_connect(config)
user_notification.objects.all().delete()
repo.add_notification(user_id=1, user_name='username', user_email='email',
expression_to_evaluate='some expr', check_every_seconds=1, check_times=1,
is_active=True, channel_type='telegram', fields_to_send="dsd",
source_id=ObjectId('666f6f2d6261722d71757578'))
repo.commit()
ut = repo.memories[USER_NOTIFICATIONS_MEMORY_KEY].items[0]
assert (len(user_notification.objects) == 1)
ut = repo.remove_notification(ut.id)
repo.commit()
assert (len(user_notification.objects) == 0)
def test_delete_user_notification_when_exists_by_source_id():
config = configure_app()
store = UsersMongoStore(config, mock_log)
repo = UsersRepository(store)
helpers.do_local_connect(config)
user_notification.objects.all().delete()
repo.add_notification(user_id=1, user_name='username', user_email='email',
expression_to_evaluate='some expr', check_every_seconds=1, check_times=1,
is_active=True, channel_type='telegram',
fields_to_send="dsd",
source_id=ObjectId('666f6f2d6261722d71757578'))
repo.commit()
ut = repo.memories[USER_NOTIFICATIONS_MEMORY_KEY].items[0]
assert (len(user_notification.objects) == 1)
store.do_delete_user_notification_by_source_id(source_id=ObjectId('666f6f2d6261722d71757578'))
assert (len(user_notification.objects) == 0) | none | 1 | 1.994728 | 2 | |
src/main.py | MarkStefanovic/todo-api | 0 | 6613870 | import fastapi
import uvicorn
from fastapi import HTTPException
from starlette.middleware.cors import CORSMiddleware
from src import core, api
logger = core.logger.getChild("main")
def create_app() -> fastapi.FastAPI:
config = core.EnvironConfig()
app = fastapi.FastAPI(title="todo-api", debug=config.debug, version="0.0.0")
app.add_middleware(
CORSMiddleware,
allow_origins=config.allowed_hosts,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# app.add_event_handler("startup", create_start_app_handler(app))
# app.add_event_handler("shutdown", create_stop_app_handler(app))
app.add_exception_handler(HTTPException, api.http_error_handler)
# app.add_exception_handler(RequestValidationError, http422_error_handler)
app.include_router(api.router, prefix="/api")
return app
if __name__ == "__main__":
import webbrowser
webbrowser.open("http://localhost:8000/docs")
import dotenv
dotenv.load_dotenv(dotenv.find_dotenv())
app = create_app()
uvicorn.run(app, host="0.0.0.0", port=8000)
| import fastapi
import uvicorn
from fastapi import HTTPException
from starlette.middleware.cors import CORSMiddleware
from src import core, api
logger = core.logger.getChild("main")
def create_app() -> fastapi.FastAPI:
config = core.EnvironConfig()
app = fastapi.FastAPI(title="todo-api", debug=config.debug, version="0.0.0")
app.add_middleware(
CORSMiddleware,
allow_origins=config.allowed_hosts,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# app.add_event_handler("startup", create_start_app_handler(app))
# app.add_event_handler("shutdown", create_stop_app_handler(app))
app.add_exception_handler(HTTPException, api.http_error_handler)
# app.add_exception_handler(RequestValidationError, http422_error_handler)
app.include_router(api.router, prefix="/api")
return app
if __name__ == "__main__":
import webbrowser
webbrowser.open("http://localhost:8000/docs")
import dotenv
dotenv.load_dotenv(dotenv.find_dotenv())
app = create_app()
uvicorn.run(app, host="0.0.0.0", port=8000)
| en | 0.191662 | # app.add_event_handler("startup", create_start_app_handler(app)) # app.add_event_handler("shutdown", create_stop_app_handler(app)) # app.add_exception_handler(RequestValidationError, http422_error_handler) | 2.438218 | 2 |
valveless/problem.py | stigmarl/valveless-pump | 0 | 6613871 | <filename>valveless/problem.py
import numpy as np
class Problem(object):
"""
This class encapsulates the relevant physical parameters of the problem into an object.
"""
def __init__(self, L, D, f, psi_ca, a_0, alpha_m, rho_t, mu_m, eta_f, gamma):
"""
Initializes and sets the various physical parameters.
Parameters
----------
L: scalar
Length of the along bubble the z axis [mm]
D: scalar
Length of transition region [mm]
f: scalar
Frequency of the driving wave [kHz]
psi_ca: scalar
Diameter oscillation amplitude for the bubble [mm]
a_0: scalar
Radius of gas bubble [mm]
alpha_m: scalar
Relative vole for tissue matrix []
rho_t: scalar
Tissue mass density, including tissue matrix and fluid [kg/cm³]
mu_m: scalar
Shear stiffness the tissue matrix [kg/(m*s)] = [Pa*s]
eta_f: scalar
Dynamic viscosity of the fluid [Pa*s
gamma: scalar
Viscous friction between fluid and tissue matrix.
"""
self.L = L
self.D = D
self.f = f
self.psi_ca = psi_ca
self.a_0 = a_0
self.alpha_m = alpha_m
self.alpha_f = 1- self.alpha_m
self.rho_t = rho_t
self.mu_m = mu_m
self.eta_f = eta_f
self.gamma = gamma
self.rho_m = self.alpha_m*self.rho_t
self.rho_f = self.alpha_m*self.rho_t
def _psi_c_amplitude(self, z):
"""
Helper function that returns the amplitude of the capillary surface vibrations at a specific position.
Parameters
----------
z: scalar
Position along z axis
Returns
-------
scalar:
The amplitude at a position z > 0.
"""
if z <= self.L/2 - self.D:
return self.psi_ca
elif z > self.L/2 - self.D and z < self.L/2:
return self.psi_ca/2*(1-np.cos(np.pi*(z-self.L/2)/self.D))
else:
return 0
def vec_psi_c_amplitude(self, z_array):
"""
Vectorized version of _psi_c_amplitude.
Parameters
----------
z_array: ndarray
1D array of positions along the z axis.
Returns
-------
ndarray
1D array of amplitudes.
"""
vec_psi_z = np.vectorize(self._psi_c_amplitude)
return vec_psi_z(z_array)
| <filename>valveless/problem.py
import numpy as np
class Problem(object):
"""
This class encapsulates the relevant physical parameters of the problem into an object.
"""
def __init__(self, L, D, f, psi_ca, a_0, alpha_m, rho_t, mu_m, eta_f, gamma):
"""
Initializes and sets the various physical parameters.
Parameters
----------
L: scalar
Length of the along bubble the z axis [mm]
D: scalar
Length of transition region [mm]
f: scalar
Frequency of the driving wave [kHz]
psi_ca: scalar
Diameter oscillation amplitude for the bubble [mm]
a_0: scalar
Radius of gas bubble [mm]
alpha_m: scalar
Relative vole for tissue matrix []
rho_t: scalar
Tissue mass density, including tissue matrix and fluid [kg/cm³]
mu_m: scalar
Shear stiffness the tissue matrix [kg/(m*s)] = [Pa*s]
eta_f: scalar
Dynamic viscosity of the fluid [Pa*s
gamma: scalar
Viscous friction between fluid and tissue matrix.
"""
self.L = L
self.D = D
self.f = f
self.psi_ca = psi_ca
self.a_0 = a_0
self.alpha_m = alpha_m
self.alpha_f = 1- self.alpha_m
self.rho_t = rho_t
self.mu_m = mu_m
self.eta_f = eta_f
self.gamma = gamma
self.rho_m = self.alpha_m*self.rho_t
self.rho_f = self.alpha_m*self.rho_t
def _psi_c_amplitude(self, z):
"""
Helper function that returns the amplitude of the capillary surface vibrations at a specific position.
Parameters
----------
z: scalar
Position along z axis
Returns
-------
scalar:
The amplitude at a position z > 0.
"""
if z <= self.L/2 - self.D:
return self.psi_ca
elif z > self.L/2 - self.D and z < self.L/2:
return self.psi_ca/2*(1-np.cos(np.pi*(z-self.L/2)/self.D))
else:
return 0
def vec_psi_c_amplitude(self, z_array):
"""
Vectorized version of _psi_c_amplitude.
Parameters
----------
z_array: ndarray
1D array of positions along the z axis.
Returns
-------
ndarray
1D array of amplitudes.
"""
vec_psi_z = np.vectorize(self._psi_c_amplitude)
return vec_psi_z(z_array)
| en | 0.718974 | This class encapsulates the relevant physical parameters of the problem into an object. Initializes and sets the various physical parameters. Parameters ---------- L: scalar Length of the along bubble the z axis [mm] D: scalar Length of transition region [mm] f: scalar Frequency of the driving wave [kHz] psi_ca: scalar Diameter oscillation amplitude for the bubble [mm] a_0: scalar Radius of gas bubble [mm] alpha_m: scalar Relative vole for tissue matrix [] rho_t: scalar Tissue mass density, including tissue matrix and fluid [kg/cm³] mu_m: scalar Shear stiffness the tissue matrix [kg/(m*s)] = [Pa*s] eta_f: scalar Dynamic viscosity of the fluid [Pa*s gamma: scalar Viscous friction between fluid and tissue matrix. Helper function that returns the amplitude of the capillary surface vibrations at a specific position. Parameters ---------- z: scalar Position along z axis Returns ------- scalar: The amplitude at a position z > 0. Vectorized version of _psi_c_amplitude. Parameters ---------- z_array: ndarray 1D array of positions along the z axis. Returns ------- ndarray 1D array of amplitudes. | 3.403209 | 3 |
Analytics/importers/lisbon.py | thanosbnt/SharingCitiesDashboard | 4 | 6613872 | <reponame>thanosbnt/SharingCitiesDashboard<gh_stars>1-10
import json
import logging
import traceback
import sys
from typing import Union
import pandas as pd
import requests
from requests.auth import HTTPBasicAuth
from importers.base import BaseImporter
from importers.json_reader import JsonReader
from .state_decorator import ImporterStatus, Status
from .attr_range_decorator import update_attribute_ranges
sys.path.append("../..")
from settings import GetConfig
logging.basicConfig(level='INFO')
logger = logging.getLogger(__name__)
@GetConfig("LisbonAPI", 'api_endpoints', 'lisbon')
class LisbonAPI(BaseImporter):
"""
LisbonAPI Importer
"""
importer_status = ImporterStatus.get_importer_status()
def __init__(self) -> None:
"""
Get Importer configurations
Instantiate BaseImporter
"""
super().__init__(self.API_NAME, self.BASE_URL, self.REFRESH_TIME,
self.API_KEY, self.API_CLASS, self.TOKEN_EXPIRY)
@update_attribute_ranges
def _create_datasource(self, headers: Union[str, None] = None) -> None:
"""
Create DataSource
:param headers: Request headers
"""
try:
_headers = {'Authorization': 'Bearer %s' % self._refresh_token()}
super()._create_datasource(_headers)
self.df = self.create_dataframe(object_separator='timeToLive')
self.df = self.df[self.df['streamName'] == 'GiraStation']
data = self.df['data'].tolist()
temp_df = None
for d in data:
j_reader = JsonReader()
j_reader.create_objects(json.loads(d))
_df = j_reader.create_dataframe()
if temp_df is None:
temp_df = _df
else:
temp_df = temp_df.append(_df, ignore_index=True)
concat_df = pd.concat([self.df, temp_df], axis=0)
if concat_df.empty:
concat_df.to_csv('/Users/hemanshu/Desktop/lisbon_test.csv')
logger.error('Nothing to save as dataframe is empty')
else:
logger.info(concat_df)
self.create_datasource(dataframe=concat_df, sensor_tag='', attribute_tag=[],
unit_value=[], bespoke_unit_tag=[], description=[],
bespoke_sub_theme=[], location_tag='loc',
api_timestamp_tag='run_time_stamp')
self.importer_status.status = Status.success(__class__.__name__)
except Exception as e:
self.importer_status.status = Status.failure(__class__.__name__, e.__str__(), traceback.format_exc())
def _refresh_token(self) -> str:
"""
Refresh API Token
:param args: Variable argument list
:return: New token
"""
headers = {"grant_type": "client_credentials"}
token_url = 'https://iot.alticelabs.com/api/devices/token'
token = requests.post(token_url, headers=headers, auth=HTTPBasicAuth(self.USER_NAME, self.USER_PASSCODE))
return str(token.text)
| import json
import logging
import traceback
import sys
from typing import Union
import pandas as pd
import requests
from requests.auth import HTTPBasicAuth
from importers.base import BaseImporter
from importers.json_reader import JsonReader
from .state_decorator import ImporterStatus, Status
from .attr_range_decorator import update_attribute_ranges
sys.path.append("../..")
from settings import GetConfig
logging.basicConfig(level='INFO')
logger = logging.getLogger(__name__)
@GetConfig("LisbonAPI", 'api_endpoints', 'lisbon')
class LisbonAPI(BaseImporter):
"""
LisbonAPI Importer
"""
importer_status = ImporterStatus.get_importer_status()
def __init__(self) -> None:
"""
Get Importer configurations
Instantiate BaseImporter
"""
super().__init__(self.API_NAME, self.BASE_URL, self.REFRESH_TIME,
self.API_KEY, self.API_CLASS, self.TOKEN_EXPIRY)
@update_attribute_ranges
def _create_datasource(self, headers: Union[str, None] = None) -> None:
"""
Create DataSource
:param headers: Request headers
"""
try:
_headers = {'Authorization': 'Bearer %s' % self._refresh_token()}
super()._create_datasource(_headers)
self.df = self.create_dataframe(object_separator='timeToLive')
self.df = self.df[self.df['streamName'] == 'GiraStation']
data = self.df['data'].tolist()
temp_df = None
for d in data:
j_reader = JsonReader()
j_reader.create_objects(json.loads(d))
_df = j_reader.create_dataframe()
if temp_df is None:
temp_df = _df
else:
temp_df = temp_df.append(_df, ignore_index=True)
concat_df = pd.concat([self.df, temp_df], axis=0)
if concat_df.empty:
concat_df.to_csv('/Users/hemanshu/Desktop/lisbon_test.csv')
logger.error('Nothing to save as dataframe is empty')
else:
logger.info(concat_df)
self.create_datasource(dataframe=concat_df, sensor_tag='', attribute_tag=[],
unit_value=[], bespoke_unit_tag=[], description=[],
bespoke_sub_theme=[], location_tag='loc',
api_timestamp_tag='run_time_stamp')
self.importer_status.status = Status.success(__class__.__name__)
except Exception as e:
self.importer_status.status = Status.failure(__class__.__name__, e.__str__(), traceback.format_exc())
def _refresh_token(self) -> str:
"""
Refresh API Token
:param args: Variable argument list
:return: New token
"""
headers = {"grant_type": "client_credentials"}
token_url = 'https://iot.alticelabs.com/api/devices/token'
token = requests.post(token_url, headers=headers, auth=HTTPBasicAuth(self.USER_NAME, self.USER_PASSCODE))
return str(token.text) | en | 0.355268 | LisbonAPI Importer Get Importer configurations Instantiate BaseImporter Create DataSource :param headers: Request headers Refresh API Token :param args: Variable argument list :return: New token | 2.258839 | 2 |
tests/unit/ska_tmc_cdm/schemas/test_codec.py | ska-telescope/cdm-shared-library | 0 | 6613873 | """
Unit tests for the ska_tmc_cdm.schemas.codec module.
"""
import copy
import json
import tempfile
import functools
import pytest
import ska_tmc_cdm
from ska_tmc_cdm.exceptions import JsonValidationError, SchemaNotFound
from ska_tmc_cdm.messages.central_node.assign_resources import AssignResourcesRequest
from ska_tmc_cdm.messages.subarray_node.configure import ConfigureRequest
from ska_tmc_cdm.schemas import CODEC
from ska_tmc_cdm.utils import assert_json_is_equal
from tests.unit.ska_tmc_cdm.schemas.central_node.test_assign_resources import (
VALID_MID_ASSIGNRESOURCESREQUEST_JSON,
VALID_MID_ASSIGNRESOURCESREQUEST_OBJECT,
VALID_LOW_ASSIGNRESOURCESREQUEST_JSON,
VALID_LOW_ASSIGNRESOURCESREQUEST_OBJECT
)
from tests.unit.ska_tmc_cdm.schemas.subarray_node.test_configure import (
VALID_MID_CONFIGURE_JSON,
VALID_MID_CONFIGURE_OBJECT,
VALID_LOW_CONFIGURE_JSON,
VALID_LOW_CONFIGURE_OBJECT,
INVALID_LOW_CONFIGURE_JSON
)
TEST_PARAMETERS = [
(AssignResourcesRequest,
VALID_MID_ASSIGNRESOURCESREQUEST_JSON,
VALID_MID_ASSIGNRESOURCESREQUEST_OBJECT),
(AssignResourcesRequest,
VALID_LOW_ASSIGNRESOURCESREQUEST_JSON,
VALID_LOW_ASSIGNRESOURCESREQUEST_OBJECT),
(ConfigureRequest,
VALID_MID_CONFIGURE_JSON,
VALID_MID_CONFIGURE_OBJECT),
(ConfigureRequest,
VALID_LOW_CONFIGURE_JSON,
VALID_LOW_CONFIGURE_OBJECT),
]
@pytest.mark.parametrize(
"msg_cls,json_str,expected",
TEST_PARAMETERS
)
def test_codec_loads(msg_cls, json_str, expected):
"""
Verify that the codec unmarshalls objects correctly.
"""
unmarshalled = CODEC.loads(msg_cls, json_str)
assert unmarshalled == expected
@pytest.mark.parametrize(
"msg_cls,expected,instance",
TEST_PARAMETERS
)
def test_codec_dumps(msg_cls, expected, instance):
"""
Verify that the codec unmarshalls objects correctly.
"""
marshalled = CODEC.dumps(instance)
assert_json_is_equal(marshalled, expected)
@pytest.mark.parametrize(
"msg_cls,json_str,expected",
TEST_PARAMETERS
)
def test_codec_load_from_file(msg_cls, json_str, expected):
"""
Verify that the codec loads JSON from file for all key objects.
"""
# mode='w' is required otherwise tempfile expects bytes
with tempfile.NamedTemporaryFile(mode='w') as f:
f.write(json_str)
f.flush()
unmarshalled = CODEC.load_from_file(msg_cls, f.name)
assert unmarshalled == expected
def test_codec_loads_raises_exception_on_invalid_schema():
"""
Verify that loading data that references an invalid schema raises
SchemaNotFound when strictness=2.
"""
# create some test JSON that references an invalid schema
invalid_data = json.loads(VALID_LOW_CONFIGURE_JSON)
invalid_data['interface'] = 'https://foo.com/badschema/2.0'
invalid_data = json.dumps(invalid_data)
with pytest.raises(SchemaNotFound):
CODEC.loads(ConfigureRequest, invalid_data, strictness=2)
def test_codec_dumps_raises_exception_on_invalid_schema():
"""
Verify that dumping data that references an invalid schema raises
SchemaNotFound when strictness=2.
"""
# create a test object that references an invalid schema
invalid_data = copy.deepcopy(VALID_LOW_CONFIGURE_OBJECT)
invalid_data.interface = 'https://foo.com/badschema/2.0'
# validation should occur regardless of strictness, but exceptions are
# only raised when strictness=2
CODEC.dumps(invalid_data, strictness=0)
CODEC.dumps(invalid_data, strictness=1)
with pytest.raises(SchemaNotFound):
CODEC.dumps(invalid_data, strictness=2)
def test_loads_invalid_json_with_validation_enabled():
"""
Verify that the strictness argument is respected when loading invalid
JSON, resulting in a JsonValidationError with strictness=2.
"""
test_call = functools.partial(
CODEC.loads, ConfigureRequest, INVALID_LOW_CONFIGURE_JSON, validate=True
)
# no exception should be raised unless strictness is 0 or 1
for strictness in [0, 1]:
unmarshalled = test_call(strictness=strictness)
marshalled = CODEC.dumps(unmarshalled, validate=False)
assert_json_is_equal(INVALID_LOW_CONFIGURE_JSON, marshalled)
# strictness=2 should result in an error
with pytest.raises(JsonValidationError):
test_call(strictness=2)
@pytest.mark.parametrize("strictness", [0, 1, 2])
def test_loads_invalid_json_with_validation_disabled(strictness):
"""
Verify that the invalid JSON can be loaded when validation is disabled.
"""
unmarshalled = CODEC.loads(
ConfigureRequest,
INVALID_LOW_CONFIGURE_JSON,
validate=False,
strictness=strictness
)
marshalled = CODEC.dumps(unmarshalled, validate=False)
assert_json_is_equal(INVALID_LOW_CONFIGURE_JSON, marshalled)
@pytest.mark.parametrize('message_cls', [
ska_tmc_cdm.messages.central_node.assign_resources.AssignResourcesRequest,
ska_tmc_cdm.messages.central_node.assign_resources.AssignResourcesResponse,
ska_tmc_cdm.messages.central_node.release_resources.ReleaseResourcesRequest,
ska_tmc_cdm.messages.subarray_node.configure.ConfigureRequest,
ska_tmc_cdm.messages.subarray_node.scan.ScanRequest,
ska_tmc_cdm.messages.subarray_node.assigned_resources.AssignedResources,
ska_tmc_cdm.messages.mccscontroller.allocate.AllocateRequest,
ska_tmc_cdm.messages.mccscontroller.releaseresources.ReleaseResourcesRequest,
ska_tmc_cdm.messages.mccssubarray.configure.ConfigureRequest,
ska_tmc_cdm.messages.mccssubarray.scan.ScanRequest,
ska_tmc_cdm.messages.mccssubarray.assigned_resources.AssignedResources
])
def test_schema_registration(message_cls):
"""
Verify that a schema is registered with the MarshmallowCodec.
"""
assert message_cls in CODEC._schema
| """
Unit tests for the ska_tmc_cdm.schemas.codec module.
"""
import copy
import json
import tempfile
import functools
import pytest
import ska_tmc_cdm
from ska_tmc_cdm.exceptions import JsonValidationError, SchemaNotFound
from ska_tmc_cdm.messages.central_node.assign_resources import AssignResourcesRequest
from ska_tmc_cdm.messages.subarray_node.configure import ConfigureRequest
from ska_tmc_cdm.schemas import CODEC
from ska_tmc_cdm.utils import assert_json_is_equal
from tests.unit.ska_tmc_cdm.schemas.central_node.test_assign_resources import (
VALID_MID_ASSIGNRESOURCESREQUEST_JSON,
VALID_MID_ASSIGNRESOURCESREQUEST_OBJECT,
VALID_LOW_ASSIGNRESOURCESREQUEST_JSON,
VALID_LOW_ASSIGNRESOURCESREQUEST_OBJECT
)
from tests.unit.ska_tmc_cdm.schemas.subarray_node.test_configure import (
VALID_MID_CONFIGURE_JSON,
VALID_MID_CONFIGURE_OBJECT,
VALID_LOW_CONFIGURE_JSON,
VALID_LOW_CONFIGURE_OBJECT,
INVALID_LOW_CONFIGURE_JSON
)
TEST_PARAMETERS = [
(AssignResourcesRequest,
VALID_MID_ASSIGNRESOURCESREQUEST_JSON,
VALID_MID_ASSIGNRESOURCESREQUEST_OBJECT),
(AssignResourcesRequest,
VALID_LOW_ASSIGNRESOURCESREQUEST_JSON,
VALID_LOW_ASSIGNRESOURCESREQUEST_OBJECT),
(ConfigureRequest,
VALID_MID_CONFIGURE_JSON,
VALID_MID_CONFIGURE_OBJECT),
(ConfigureRequest,
VALID_LOW_CONFIGURE_JSON,
VALID_LOW_CONFIGURE_OBJECT),
]
@pytest.mark.parametrize(
"msg_cls,json_str,expected",
TEST_PARAMETERS
)
def test_codec_loads(msg_cls, json_str, expected):
"""
Verify that the codec unmarshalls objects correctly.
"""
unmarshalled = CODEC.loads(msg_cls, json_str)
assert unmarshalled == expected
@pytest.mark.parametrize(
"msg_cls,expected,instance",
TEST_PARAMETERS
)
def test_codec_dumps(msg_cls, expected, instance):
"""
Verify that the codec unmarshalls objects correctly.
"""
marshalled = CODEC.dumps(instance)
assert_json_is_equal(marshalled, expected)
@pytest.mark.parametrize(
"msg_cls,json_str,expected",
TEST_PARAMETERS
)
def test_codec_load_from_file(msg_cls, json_str, expected):
"""
Verify that the codec loads JSON from file for all key objects.
"""
# mode='w' is required otherwise tempfile expects bytes
with tempfile.NamedTemporaryFile(mode='w') as f:
f.write(json_str)
f.flush()
unmarshalled = CODEC.load_from_file(msg_cls, f.name)
assert unmarshalled == expected
def test_codec_loads_raises_exception_on_invalid_schema():
"""
Verify that loading data that references an invalid schema raises
SchemaNotFound when strictness=2.
"""
# create some test JSON that references an invalid schema
invalid_data = json.loads(VALID_LOW_CONFIGURE_JSON)
invalid_data['interface'] = 'https://foo.com/badschema/2.0'
invalid_data = json.dumps(invalid_data)
with pytest.raises(SchemaNotFound):
CODEC.loads(ConfigureRequest, invalid_data, strictness=2)
def test_codec_dumps_raises_exception_on_invalid_schema():
"""
Verify that dumping data that references an invalid schema raises
SchemaNotFound when strictness=2.
"""
# create a test object that references an invalid schema
invalid_data = copy.deepcopy(VALID_LOW_CONFIGURE_OBJECT)
invalid_data.interface = 'https://foo.com/badschema/2.0'
# validation should occur regardless of strictness, but exceptions are
# only raised when strictness=2
CODEC.dumps(invalid_data, strictness=0)
CODEC.dumps(invalid_data, strictness=1)
with pytest.raises(SchemaNotFound):
CODEC.dumps(invalid_data, strictness=2)
def test_loads_invalid_json_with_validation_enabled():
"""
Verify that the strictness argument is respected when loading invalid
JSON, resulting in a JsonValidationError with strictness=2.
"""
test_call = functools.partial(
CODEC.loads, ConfigureRequest, INVALID_LOW_CONFIGURE_JSON, validate=True
)
# no exception should be raised unless strictness is 0 or 1
for strictness in [0, 1]:
unmarshalled = test_call(strictness=strictness)
marshalled = CODEC.dumps(unmarshalled, validate=False)
assert_json_is_equal(INVALID_LOW_CONFIGURE_JSON, marshalled)
# strictness=2 should result in an error
with pytest.raises(JsonValidationError):
test_call(strictness=2)
@pytest.mark.parametrize("strictness", [0, 1, 2])
def test_loads_invalid_json_with_validation_disabled(strictness):
"""
Verify that the invalid JSON can be loaded when validation is disabled.
"""
unmarshalled = CODEC.loads(
ConfigureRequest,
INVALID_LOW_CONFIGURE_JSON,
validate=False,
strictness=strictness
)
marshalled = CODEC.dumps(unmarshalled, validate=False)
assert_json_is_equal(INVALID_LOW_CONFIGURE_JSON, marshalled)
@pytest.mark.parametrize('message_cls', [
ska_tmc_cdm.messages.central_node.assign_resources.AssignResourcesRequest,
ska_tmc_cdm.messages.central_node.assign_resources.AssignResourcesResponse,
ska_tmc_cdm.messages.central_node.release_resources.ReleaseResourcesRequest,
ska_tmc_cdm.messages.subarray_node.configure.ConfigureRequest,
ska_tmc_cdm.messages.subarray_node.scan.ScanRequest,
ska_tmc_cdm.messages.subarray_node.assigned_resources.AssignedResources,
ska_tmc_cdm.messages.mccscontroller.allocate.AllocateRequest,
ska_tmc_cdm.messages.mccscontroller.releaseresources.ReleaseResourcesRequest,
ska_tmc_cdm.messages.mccssubarray.configure.ConfigureRequest,
ska_tmc_cdm.messages.mccssubarray.scan.ScanRequest,
ska_tmc_cdm.messages.mccssubarray.assigned_resources.AssignedResources
])
def test_schema_registration(message_cls):
"""
Verify that a schema is registered with the MarshmallowCodec.
"""
assert message_cls in CODEC._schema
| en | 0.76378 | Unit tests for the ska_tmc_cdm.schemas.codec module. Verify that the codec unmarshalls objects correctly. Verify that the codec unmarshalls objects correctly. Verify that the codec loads JSON from file for all key objects. # mode='w' is required otherwise tempfile expects bytes Verify that loading data that references an invalid schema raises SchemaNotFound when strictness=2. # create some test JSON that references an invalid schema Verify that dumping data that references an invalid schema raises SchemaNotFound when strictness=2. # create a test object that references an invalid schema # validation should occur regardless of strictness, but exceptions are # only raised when strictness=2 Verify that the strictness argument is respected when loading invalid JSON, resulting in a JsonValidationError with strictness=2. # no exception should be raised unless strictness is 0 or 1 # strictness=2 should result in an error Verify that the invalid JSON can be loaded when validation is disabled. Verify that a schema is registered with the MarshmallowCodec. | 2.078294 | 2 |
suppress_tutorial.py | NightmareQAQ/python-notes | 106 | 6613874 | <filename>suppress_tutorial.py
# ref.
# https://docs.python.org/3/library/contextlib.html#contextlib.suppress
# ignore specific errors
from contextlib import suppress
def ex1():
with suppress(TypeError):
t = '1' / 0
# Equivalent
# try:
# t = '1' / 0
# except TypeError:
# pass
def ex2():
with suppress(TypeError, ZeroDivisionError):
t = 1 / 0
# Equivalent
# try:
# t = 1 / 0
# except (TypeError,ZeroDivisionError):
# pass
# try:
# t = 1 / 0
# except TypeError:
# pass
# except ZeroDivisionError:
# pass
if __name__ == "__main__":
ex1()
# ex2()
| <filename>suppress_tutorial.py
# ref.
# https://docs.python.org/3/library/contextlib.html#contextlib.suppress
# ignore specific errors
from contextlib import suppress
def ex1():
with suppress(TypeError):
t = '1' / 0
# Equivalent
# try:
# t = '1' / 0
# except TypeError:
# pass
def ex2():
with suppress(TypeError, ZeroDivisionError):
t = 1 / 0
# Equivalent
# try:
# t = 1 / 0
# except (TypeError,ZeroDivisionError):
# pass
# try:
# t = 1 / 0
# except TypeError:
# pass
# except ZeroDivisionError:
# pass
if __name__ == "__main__":
ex1()
# ex2()
| en | 0.600844 | # ref. # https://docs.python.org/3/library/contextlib.html#contextlib.suppress # ignore specific errors # Equivalent # try: # t = '1' / 0 # except TypeError: # pass # Equivalent # try: # t = 1 / 0 # except (TypeError,ZeroDivisionError): # pass # try: # t = 1 / 0 # except TypeError: # pass # except ZeroDivisionError: # pass # ex2() | 3.064308 | 3 |
report/insert/expressions/scripts/simulation.py | DunstanBecht/lpa-workspace | 0 | 6613875 | #!/usr/bin/env python
# coding: utf-8
import numpy as np
from lpa.input import models, notation
def smart_load(r, geo, siz, dismod, modprm, edgcon):
stm = f"../saved/{dismod.__name__}{notation.parameters(modprm, c='stm')}_FUN_{edgcon}.txt"
fM = stm.replace('FUN', 'MMMM')
fK = stm.replace('FUN', 'KKKK')
fg = stm.replace('FUN', 'gggg')
fG = stm.replace('FUN', 'GaGs')
MMMM = np.loadtxt(fM)
KKKK = np.loadtxt(fK)
gggg = np.loadtxt(fg)
GaGs = np.loadtxt(fG)
return MMMM, KKKK, gggg, GaGs
def RDD(r, geo, siz, edgcon, modprm):
return smart_load(r, geo, siz, models.RDD, modprm, edgcon)
def RRDD(r, geo, siz, edgcon, modprm):
return smart_load(r, geo, siz, models.RRDD, modprm, edgcon)
| #!/usr/bin/env python
# coding: utf-8
import numpy as np
from lpa.input import models, notation
def smart_load(r, geo, siz, dismod, modprm, edgcon):
stm = f"../saved/{dismod.__name__}{notation.parameters(modprm, c='stm')}_FUN_{edgcon}.txt"
fM = stm.replace('FUN', 'MMMM')
fK = stm.replace('FUN', 'KKKK')
fg = stm.replace('FUN', 'gggg')
fG = stm.replace('FUN', 'GaGs')
MMMM = np.loadtxt(fM)
KKKK = np.loadtxt(fK)
gggg = np.loadtxt(fg)
GaGs = np.loadtxt(fG)
return MMMM, KKKK, gggg, GaGs
def RDD(r, geo, siz, edgcon, modprm):
return smart_load(r, geo, siz, models.RDD, modprm, edgcon)
def RRDD(r, geo, siz, edgcon, modprm):
return smart_load(r, geo, siz, models.RRDD, modprm, edgcon)
| en | 0.325294 | #!/usr/bin/env python # coding: utf-8 | 2.297371 | 2 |
FIT/migrations/0001_initial.py | Scottchiang90/Procom | 0 | 6613876 | # Generated by Django 3.2.9 on 2021-11-15 05:26
from django.db import migrations, models
import django.db.models.deletion
import phonenumber_field.modelfields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Facilitator',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='full name')),
('unit', models.CharField(choices=[('1-3', '1-3'), ('1-4', '1-4'), ('1-5', '1-5'), ('1-6', '1-6'), ('1-7', '1-7'), ('1-8', '1-8'), ('1-9', '1-9'), ('1-11', '1-11'), ('1LDR', '1st Div Leadership'), ('OTHER', 'Others')], max_length=5)),
('email', models.EmailField(max_length=254)),
('mobile_number', phonenumber_field.modelfields.PhoneNumberField(max_length=128, region='SG')),
],
),
migrations.CreateModel(
name='Participant',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=50, verbose_name='full name')),
('unit', models.CharField(blank=True, choices=[('1-3', '1-3'), ('1-4', '1-4'), ('1-5', '1-5'), ('1-6', '1-6'), ('1-7', '1-7'), ('1-8', '1-8'), ('1-9', '1-9'), ('1-11', '1-11'), ('1LDR', '1st Div Leadership'), ('OTHER', 'Others')], max_length=5)),
('email', models.EmailField(blank=True, max_length=254)),
('mobile_number', phonenumber_field.modelfields.PhoneNumberField(blank=True, max_length=128, region='SG')),
('uid', models.CharField(max_length=20, unique=True, verbose_name='Buddy FIT UID')),
('nric', models.CharField(blank=True, max_length=4, verbose_name='NRIC (Last 4 characters)')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Participation',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_datetime', models.DateTimeField(auto_now=True)),
('participant', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='FIT.participant')),
],
),
migrations.CreateModel(
name='Session',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('capacity', models.IntegerField(default=40)),
('date_time', models.DateTimeField()),
('duration', models.IntegerField(default=60, verbose_name='duration (in mins)')),
('call_link', models.URLField(blank=True)),
('instructions', models.CharField(blank=True, max_length=50)),
('conducting', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='conducting', to='FIT.facilitator')),
('participants', models.ManyToManyField(blank=True, through='FIT.Participation', to='FIT.Participant')),
('safety', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='safety', to='FIT.facilitator')),
],
),
migrations.AddField(
model_name='participation',
name='session',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='FIT.session'),
),
migrations.AlterUniqueTogether(
name='participation',
unique_together={('session', 'participant')},
),
]
| # Generated by Django 3.2.9 on 2021-11-15 05:26
from django.db import migrations, models
import django.db.models.deletion
import phonenumber_field.modelfields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Facilitator',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='full name')),
('unit', models.CharField(choices=[('1-3', '1-3'), ('1-4', '1-4'), ('1-5', '1-5'), ('1-6', '1-6'), ('1-7', '1-7'), ('1-8', '1-8'), ('1-9', '1-9'), ('1-11', '1-11'), ('1LDR', '1st Div Leadership'), ('OTHER', 'Others')], max_length=5)),
('email', models.EmailField(max_length=254)),
('mobile_number', phonenumber_field.modelfields.PhoneNumberField(max_length=128, region='SG')),
],
),
migrations.CreateModel(
name='Participant',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=50, verbose_name='full name')),
('unit', models.CharField(blank=True, choices=[('1-3', '1-3'), ('1-4', '1-4'), ('1-5', '1-5'), ('1-6', '1-6'), ('1-7', '1-7'), ('1-8', '1-8'), ('1-9', '1-9'), ('1-11', '1-11'), ('1LDR', '1st Div Leadership'), ('OTHER', 'Others')], max_length=5)),
('email', models.EmailField(blank=True, max_length=254)),
('mobile_number', phonenumber_field.modelfields.PhoneNumberField(blank=True, max_length=128, region='SG')),
('uid', models.CharField(max_length=20, unique=True, verbose_name='Buddy FIT UID')),
('nric', models.CharField(blank=True, max_length=4, verbose_name='NRIC (Last 4 characters)')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Participation',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_datetime', models.DateTimeField(auto_now=True)),
('participant', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='FIT.participant')),
],
),
migrations.CreateModel(
name='Session',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('capacity', models.IntegerField(default=40)),
('date_time', models.DateTimeField()),
('duration', models.IntegerField(default=60, verbose_name='duration (in mins)')),
('call_link', models.URLField(blank=True)),
('instructions', models.CharField(blank=True, max_length=50)),
('conducting', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='conducting', to='FIT.facilitator')),
('participants', models.ManyToManyField(blank=True, through='FIT.Participation', to='FIT.Participant')),
('safety', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='safety', to='FIT.facilitator')),
],
),
migrations.AddField(
model_name='participation',
name='session',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='FIT.session'),
),
migrations.AlterUniqueTogether(
name='participation',
unique_together={('session', 'participant')},
),
]
| en | 0.896718 | # Generated by Django 3.2.9 on 2021-11-15 05:26 | 1.772335 | 2 |
neuronit/play/migrations/0015_auto_20170331_0856.py | neuronit/pfa | 0 | 6613877 | <reponame>neuronit/pfa
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2017-03-31 08:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('play', '0014_auto_20170330_0052'),
]
operations = [
migrations.AlterField(
model_name='reseau',
name='type',
field=models.CharField(choices=[(b'select_type', b'select_type'), (b'MLP', b'MLP'), (b'Elman', b'Elman'), (b'Jordan', b'Jordan')], default=b'select', max_length=100),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2017-03-31 08:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('play', '0014_auto_20170330_0052'),
]
operations = [
migrations.AlterField(
model_name='reseau',
name='type',
field=models.CharField(choices=[(b'select_type', b'select_type'), (b'MLP', b'MLP'), (b'Elman', b'Elman'), (b'Jordan', b'Jordan')], default=b'select', max_length=100),
),
] | en | 0.828672 | # -*- coding: utf-8 -*- # Generated by Django 1.9.8 on 2017-03-31 08:56 | 1.774436 | 2 |
helper/sidebar.py | hanya/SidebarByMacros | 0 | 6613878 |
import uno
import unohelper
from com.sun.star.beans import PropertyValue
from com.sun.star.beans.PropertyState import DIRECT_VALUE
from com.sun.star.container import XNameContainer, NoSuchElementException, ElementExistException
from com.sun.star.lang import XServiceInfo, \
IllegalArgumentException
from com.sun.star.ui import XUIElementFactory, XUIElement, XToolPanel, XSidebarPanel, \
LayoutSize
from com.sun.star.ui.UIElementType import TOOLPANEL as UET_TOOLPANEL
from com.sun.star.uno import RuntimeException
class SidebarHelperForMacros(unohelper.Base, XServiceInfo, XUIElementFactory):
""" Helps to someone implements sidebar components in Macros.
The factory for UI element have to be registered under
/org.openoffice.Office.UI.Factories/Registered/UIElementFactories.
And the components have to be implemented acoording to
css.ui.UIElementFactory service.
"""
IMPLE_NAME = "mytools.ui.SidebarHelperForMacros"
SERVICE_NAMES = IMPLE_NAME,
CONFIG = "/mytools.UI.SidebarsByMacros/Content/Imples"
@staticmethod
def get():
klass = SidebarHelperForMacros
return klass, klass.IMPLE_NAME, klass.SERVICE_NAMES
def __init__(self, ctx, *args):
self.ctx = ctx
# XServiceInfo
def getImplementationName(self):
return self.IMPLE_NAME
def supportsService(self, name):
return name in self.SERVICE_NAMES
def getSupportedServiceNames(self):
return self.SERVICE_NAMES
# XUIElementFactory
def createUIElement(self, res_url, args):
# see css.ui.XUIElementFactory
# check the res_url is in the configuration
settings = self._get_settings(res_url)
if not settings:
# no UI element found
raise NoSuchElementException()
frame = parent = None
for arg in args:
if arg.Name == "Frame":
frame = arg.Value
elif arg.Name == "ParentWindow":
parent = arg.Value
#elif arg.Name == "Sidebar":
# If you need css.ui.XSidebar interface to request to
# re-layout, keep it.
#elif arg.Name == "SfxBindings":
# This is just pointer address, not useful for extensions.
if not frame:
raise IllegalArgumentException()
if not parent:
raise IllegalArgumentException()
try:
# new instance of requested UI element
return SidebarUIElement(self.ctx, frame, parent, res_url, settings)
except Exception as e:
print("Error in SidebarUIElement.ctor: " + str(e))
def _create(self, name):
return self.ctx.getServiceManager().createInstanceWithContext(name, self.ctx)
def _create_configuration_reader(self, nodepath, res_url):
cp = self._create("com.sun.star.configuration.ConfigurationProvider")
try:
return cp.createInstanceWithArguments(
"com.sun.star.configuration.ConfigurationAccess",
(PropertyValue("nodepath", -1, nodepath, DIRECT_VALUE),))
except:
pass
return None
def _get_settings(self, res_url):
reader = self._create_configuration_reader(self.CONFIG, res_url)
if reader and reader.hasByName(res_url):
try:
return reader.getByName(res_url)
except:
pass
return None
g_ImplementationHelper = unohelper.ImplementationHelper()
g_ImplementationHelper.addImplementation(*SidebarHelperForMacros.get())
class SidebarUIElement(unohelper.Base, XUIElement, XToolPanel, XSidebarPanel, XNameContainer):
""" Individual panel element in deck of sidebar.
Should be implemented according to css.ui.UIElement service.
In the case of toolpanel element, you need additional interfaces:
- css.ui.XToolPanel: describes panel
- css.ui.XSidebarPanel: panel (optional, but if not, unusable)
"""
def __init__(self, ctx, frame, parent, res_url, settings):
self.res_url = res_url
self.ctx = ctx
self.frame = frame
self.parent = parent
self._values = {}
try:
self.window = self._call_macro(settings.Initialize, (self, self.parent))
except Exception as e:
print(e)
raise RuntimeException("Error while calling Initialize for " + self.res_url, None)
# XUIElement
@property
def Frame(self):
return self.frame
@property
def ResourceURL(self):
return self.res_url
@property
def Type(self):
return UET_TOOLPANEL
def getRealInterface(self):
return self # ToDo weakref?
# XToolPanel
def createAccessible(self, parent):
return None
@property
def Window(self):
return self.window
# XSidebarPanel
def getHeightForWidth(self, width):
v = self._values.get("XSidebarPanel", None)
if v:
try:
return v.getHeightForWidth(width)
except:
pass
return LayoutSize(0, -1, 0)
# LO5.1-
def getMinimalWidth(self):
return 50
#
def _call_macro(self, uri, args=()):
script = self._create_script_provider().getScript(uri)
if script:
try:
r ,_ ,_ = script.invoke(args, (), ())
return r
except Exception as e:
print(e)
return None
def _create_script_provider(self):
mspf = self.ctx.getValueByName(
"/singletons/com.sun.star.script.provider.theMasterScriptProviderFactory")
return mspf.createScriptProvider("")
# ToDo language specific script provider
# XNameContainer
# this interface is not required by the panel, just for helper
def insertByName(self, name, value):
if name in self._values:
raise ElementExistException(name, self)
else:
self._values[name] = value
def removeByName(self, name):
if name in self._values:
self._values.pop(name)
else:
raise NoSuchElementException(name, self)
def replaceByName(self, name, value):
if name in self._values:
self._values[name] = value
else:
raise NoSuchElementException(name, self)
def getByName(self, name):
try:
return self._values[name]
except:
raise NoSuchElementException(name, self)
def getElementNames(self):
return tuple(self._values.names())
def hasByName(self, name):
return name in self._values
def getElementType(self):
return uno.getTypeByName("any")
def hasElements(self):
return len(self._values) != 0
|
import uno
import unohelper
from com.sun.star.beans import PropertyValue
from com.sun.star.beans.PropertyState import DIRECT_VALUE
from com.sun.star.container import XNameContainer, NoSuchElementException, ElementExistException
from com.sun.star.lang import XServiceInfo, \
IllegalArgumentException
from com.sun.star.ui import XUIElementFactory, XUIElement, XToolPanel, XSidebarPanel, \
LayoutSize
from com.sun.star.ui.UIElementType import TOOLPANEL as UET_TOOLPANEL
from com.sun.star.uno import RuntimeException
class SidebarHelperForMacros(unohelper.Base, XServiceInfo, XUIElementFactory):
""" Helps to someone implements sidebar components in Macros.
The factory for UI element have to be registered under
/org.openoffice.Office.UI.Factories/Registered/UIElementFactories.
And the components have to be implemented acoording to
css.ui.UIElementFactory service.
"""
IMPLE_NAME = "mytools.ui.SidebarHelperForMacros"
SERVICE_NAMES = IMPLE_NAME,
CONFIG = "/mytools.UI.SidebarsByMacros/Content/Imples"
@staticmethod
def get():
klass = SidebarHelperForMacros
return klass, klass.IMPLE_NAME, klass.SERVICE_NAMES
def __init__(self, ctx, *args):
self.ctx = ctx
# XServiceInfo
def getImplementationName(self):
return self.IMPLE_NAME
def supportsService(self, name):
return name in self.SERVICE_NAMES
def getSupportedServiceNames(self):
return self.SERVICE_NAMES
# XUIElementFactory
def createUIElement(self, res_url, args):
# see css.ui.XUIElementFactory
# check the res_url is in the configuration
settings = self._get_settings(res_url)
if not settings:
# no UI element found
raise NoSuchElementException()
frame = parent = None
for arg in args:
if arg.Name == "Frame":
frame = arg.Value
elif arg.Name == "ParentWindow":
parent = arg.Value
#elif arg.Name == "Sidebar":
# If you need css.ui.XSidebar interface to request to
# re-layout, keep it.
#elif arg.Name == "SfxBindings":
# This is just pointer address, not useful for extensions.
if not frame:
raise IllegalArgumentException()
if not parent:
raise IllegalArgumentException()
try:
# new instance of requested UI element
return SidebarUIElement(self.ctx, frame, parent, res_url, settings)
except Exception as e:
print("Error in SidebarUIElement.ctor: " + str(e))
def _create(self, name):
return self.ctx.getServiceManager().createInstanceWithContext(name, self.ctx)
def _create_configuration_reader(self, nodepath, res_url):
cp = self._create("com.sun.star.configuration.ConfigurationProvider")
try:
return cp.createInstanceWithArguments(
"com.sun.star.configuration.ConfigurationAccess",
(PropertyValue("nodepath", -1, nodepath, DIRECT_VALUE),))
except:
pass
return None
def _get_settings(self, res_url):
reader = self._create_configuration_reader(self.CONFIG, res_url)
if reader and reader.hasByName(res_url):
try:
return reader.getByName(res_url)
except:
pass
return None
g_ImplementationHelper = unohelper.ImplementationHelper()
g_ImplementationHelper.addImplementation(*SidebarHelperForMacros.get())
class SidebarUIElement(unohelper.Base, XUIElement, XToolPanel, XSidebarPanel, XNameContainer):
""" Individual panel element in deck of sidebar.
Should be implemented according to css.ui.UIElement service.
In the case of toolpanel element, you need additional interfaces:
- css.ui.XToolPanel: describes panel
- css.ui.XSidebarPanel: panel (optional, but if not, unusable)
"""
def __init__(self, ctx, frame, parent, res_url, settings):
self.res_url = res_url
self.ctx = ctx
self.frame = frame
self.parent = parent
self._values = {}
try:
self.window = self._call_macro(settings.Initialize, (self, self.parent))
except Exception as e:
print(e)
raise RuntimeException("Error while calling Initialize for " + self.res_url, None)
# XUIElement
@property
def Frame(self):
return self.frame
@property
def ResourceURL(self):
return self.res_url
@property
def Type(self):
return UET_TOOLPANEL
def getRealInterface(self):
return self # ToDo weakref?
# XToolPanel
def createAccessible(self, parent):
return None
@property
def Window(self):
return self.window
# XSidebarPanel
def getHeightForWidth(self, width):
v = self._values.get("XSidebarPanel", None)
if v:
try:
return v.getHeightForWidth(width)
except:
pass
return LayoutSize(0, -1, 0)
# LO5.1-
def getMinimalWidth(self):
return 50
#
def _call_macro(self, uri, args=()):
script = self._create_script_provider().getScript(uri)
if script:
try:
r ,_ ,_ = script.invoke(args, (), ())
return r
except Exception as e:
print(e)
return None
def _create_script_provider(self):
mspf = self.ctx.getValueByName(
"/singletons/com.sun.star.script.provider.theMasterScriptProviderFactory")
return mspf.createScriptProvider("")
# ToDo language specific script provider
# XNameContainer
# this interface is not required by the panel, just for helper
def insertByName(self, name, value):
if name in self._values:
raise ElementExistException(name, self)
else:
self._values[name] = value
def removeByName(self, name):
if name in self._values:
self._values.pop(name)
else:
raise NoSuchElementException(name, self)
def replaceByName(self, name, value):
if name in self._values:
self._values[name] = value
else:
raise NoSuchElementException(name, self)
def getByName(self, name):
try:
return self._values[name]
except:
raise NoSuchElementException(name, self)
def getElementNames(self):
return tuple(self._values.names())
def hasByName(self, name):
return name in self._values
def getElementType(self):
return uno.getTypeByName("any")
def hasElements(self):
return len(self._values) != 0
| en | 0.706951 | Helps to someone implements sidebar components in Macros. The factory for UI element have to be registered under /org.openoffice.Office.UI.Factories/Registered/UIElementFactories. And the components have to be implemented acoording to css.ui.UIElementFactory service. # XServiceInfo # XUIElementFactory # see css.ui.XUIElementFactory # check the res_url is in the configuration # no UI element found #elif arg.Name == "Sidebar": # If you need css.ui.XSidebar interface to request to # re-layout, keep it. #elif arg.Name == "SfxBindings": # This is just pointer address, not useful for extensions. # new instance of requested UI element Individual panel element in deck of sidebar. Should be implemented according to css.ui.UIElement service. In the case of toolpanel element, you need additional interfaces: - css.ui.XToolPanel: describes panel - css.ui.XSidebarPanel: panel (optional, but if not, unusable) # XUIElement # ToDo weakref? # XToolPanel # XSidebarPanel # LO5.1- # # ToDo language specific script provider # XNameContainer # this interface is not required by the panel, just for helper | 1.802541 | 2 |
mahiru/registry/__init__.py | SecConNet/proof_of_concept | 4 | 6613879 | <reponame>SecConNet/proof_of_concept
"""The central registry and its replication mechanism."""
| """The central registry and its replication mechanism.""" | en | 0.953414 | The central registry and its replication mechanism. | 0.998616 | 1 |
encode.py | undercoverGod/lossy-image-compression | 16 | 6613880 | <reponame>undercoverGod/lossy-image-compression
import argparse
from model import Encoder
import os
from tqdm import tqdm
import warnings
warnings.filterwarnings('ignore')
parser = argparse.ArgumentParser()
parser.add_argument('--model', nargs='?', default='./out/main.tar', help='Path for model checkpoint file [default: ./out/main.tar]')
parser.add_argument('--image', nargs='?', default='./dataset/', help='Directory which holds the images to be compressed [default: ./dataset/]')
parser.add_argument('--out', nargs='?', default='./out/compressed/', help='Directory which will hold the compressed images [default: ./out/compressed/]')
args = parser.parse_args()
f = os.listdir(args.image)
inputs = []
for i in f:
if '.png' in i:
inputs.append(i)
encoder = Encoder(args.model)
for i in inputs:
print('converting %s...'%i)
encoder.encode_and_save(os.path.join(args.image, i), os.path.join(args.out, '%scomp.xfr'%i[:-4])) | import argparse
from model import Encoder
import os
from tqdm import tqdm
import warnings
warnings.filterwarnings('ignore')
parser = argparse.ArgumentParser()
parser.add_argument('--model', nargs='?', default='./out/main.tar', help='Path for model checkpoint file [default: ./out/main.tar]')
parser.add_argument('--image', nargs='?', default='./dataset/', help='Directory which holds the images to be compressed [default: ./dataset/]')
parser.add_argument('--out', nargs='?', default='./out/compressed/', help='Directory which will hold the compressed images [default: ./out/compressed/]')
args = parser.parse_args()
f = os.listdir(args.image)
inputs = []
for i in f:
if '.png' in i:
inputs.append(i)
encoder = Encoder(args.model)
for i in inputs:
print('converting %s...'%i)
encoder.encode_and_save(os.path.join(args.image, i), os.path.join(args.out, '%scomp.xfr'%i[:-4])) | none | 1 | 2.56135 | 3 | |
alfred/modules/api/view_components/a_action_icon.py | Sefrwahed/Alfred | 5 | 6613881 | from alfred.modules.api.view_components.a_icon import AIcon
class AActionIcon(AIcon):
def __init__(self, icon, color, data_action, data_id, size='small'):
super().__init__(icon, color, "right", size=size, **{"data-id": data_id, "data-action": data_action}) | from alfred.modules.api.view_components.a_icon import AIcon
class AActionIcon(AIcon):
def __init__(self, icon, color, data_action, data_id, size='small'):
super().__init__(icon, color, "right", size=size, **{"data-id": data_id, "data-action": data_action}) | none | 1 | 2.360316 | 2 | |
py_config/config.py | aki-nasu/jupyter_personium | 0 | 6613882 | import json
PATH_PROXY = 'config/proxy.txt'
PATH_UNIT = 'config/personium_unit.txt'
PATH_CELL_LIST = 'config/personium_cell_list.txt'
def update_config_proxy(data):
with open(PATH_PROXY, 'r') as f:
json_contents = json.load(f)
json_contents['http']['host']['host'] = data['http_host']
json_contents['http']['host']['port'] = data['http_port']
json_contents['http']['user']['user'] = data['http_user']
json_contents['http']['user']['password'] = data['http_password']
json_contents['https']['host']['host'] = data['https_host']
json_contents['https']['host']['port'] = data['https_port']
json_contents['https']['user']['user'] = data['https_user']
json_contents['https']['user']['password'] = data['https_password']
with open(PATH_PROXY, 'w') as f:
json.dump(json_contents, f, ensure_ascii=False, indent=4, sort_keys=True, separators=(',', ' : '))
return json_contents
def update_config_unit(data):
with open(PATH_UNIT, 'r') as f:
json_contents = json.load(f)
json_contents['unit_name'] = data
with open(PATH_UNIT, 'w') as f:
json.dump(json_contents, f, ensure_ascii=False, indent=4, sort_keys=True, separators=(',', ' : '))
return json_contents
def get_config_cell_list():
return
def add_config_cell_list():
return
def update_config_cell_list():
return
def get_config_cell_account(cell_name):
with open(PATH_CELL_LIST, 'r') as f:
json_contents = json.load(f)
return json_contents[cell_name]
def get_config_unit():
with open(PATH_UNIT, 'r') as f:
json_contents = json.load(f)
return json_contents["unit_name"]
| import json
PATH_PROXY = 'config/proxy.txt'
PATH_UNIT = 'config/personium_unit.txt'
PATH_CELL_LIST = 'config/personium_cell_list.txt'
def update_config_proxy(data):
with open(PATH_PROXY, 'r') as f:
json_contents = json.load(f)
json_contents['http']['host']['host'] = data['http_host']
json_contents['http']['host']['port'] = data['http_port']
json_contents['http']['user']['user'] = data['http_user']
json_contents['http']['user']['password'] = data['http_password']
json_contents['https']['host']['host'] = data['https_host']
json_contents['https']['host']['port'] = data['https_port']
json_contents['https']['user']['user'] = data['https_user']
json_contents['https']['user']['password'] = data['https_password']
with open(PATH_PROXY, 'w') as f:
json.dump(json_contents, f, ensure_ascii=False, indent=4, sort_keys=True, separators=(',', ' : '))
return json_contents
def update_config_unit(data):
with open(PATH_UNIT, 'r') as f:
json_contents = json.load(f)
json_contents['unit_name'] = data
with open(PATH_UNIT, 'w') as f:
json.dump(json_contents, f, ensure_ascii=False, indent=4, sort_keys=True, separators=(',', ' : '))
return json_contents
def get_config_cell_list():
return
def add_config_cell_list():
return
def update_config_cell_list():
return
def get_config_cell_account(cell_name):
with open(PATH_CELL_LIST, 'r') as f:
json_contents = json.load(f)
return json_contents[cell_name]
def get_config_unit():
with open(PATH_UNIT, 'r') as f:
json_contents = json.load(f)
return json_contents["unit_name"]
| none | 1 | 2.449783 | 2 | |
test/test_msdnet.py | a-luengen/anytimeDnn | 1 | 6613883 | import unittest
import torch
import os
import shutil
from .context import msdnet
from .context import utils
from .context import train_acc
from .context import data_loader
class Object(object):
pass
class TestMSDNet(unittest.TestCase):
imagenet_args = Object()
test_checkpoint_dir = "test/checkpoint"
def setUp(self):
args = Object()
test_growFact = "1-2-4-4".split("-")
test_bnFact = "1-2-4-4".split("-")
args_dict = {
'grFactor': list(map(int, test_growFact)),
'bnFactor': list(map(int, test_bnFact)),
'nBlocks': 5,
'nChannels': 32,
'base': 4,
'stepmode': 'even',
'step': 4,
'growthRate': 16,
'prune': 'max',
'bottleneck': True,
'data': 'ImageNet',
'nScales': len(test_growFact),
'reduction': 0.5
}
for key in args_dict:
setattr(args, key, args_dict[key])
self.imagenet_args = args
def tearDown(self):
if os.path.isdir(self.test_checkpoint_dir):
shutil.rmtree(self.test_checkpoint_dir)
def test000_getMSDNetFromUtils_noException(self):
net = utils.get_msd_net_model()
def test010_createMSDNetForImageNet_noException(self):
self.assertTrue(len(self.imagenet_args.grFactor) == 4)
self.assertTrue(len(self.imagenet_args.bnFactor) == 4)
net = msdnet.models.msdnet(self.imagenet_args)
self.assertIsNotNone(net)
def test020_forwardingOfMSDNet_forImagenet_noException(self):
net = msdnet.models.msdnet(self.imagenet_args)
test_img = torch.rand(1, 3, 224, 224)
res = net(test_img)
self.assertIsNotNone(res)
self.assertEqual(len(res), self.imagenet_args.nBlocks)
def test030_calculateAccuracyOfMsdNetOutput_noException(self):
net = msdnet.models.msdnet(self.imagenet_args)
loader, _, _ = data_loader.get_zipped_dataloaders(data_loader.REDUCED_SET_PATH, 1)
(test_img, test_target) = next(iter(loader))
output = net(test_img)
if not isinstance(output, list):
output = [output]
for i in range(len(output)):
train_acc(output[i].data, test_target, topk=(1,5))
def test040_CreateAndResumeCheckpoint_NoException(self):
net = msdnet.models.msdnet(self.imagenet_args)
test_epoch = 4
test_arch = 'msdnet'
test_best_acc = 4.2
test_lr = 0.001
test_mom = 0.9
test_weight_decay = 0.1
test_optim = torch.optim.SGD(net.parameters(), lr=test_lr, momentum=test_mom, weight_decay=test_weight_decay)
stateDict = utils.getStateDict(net, test_epoch, test_arch, test_best_acc, test_optim)
self.assertIsNotNone(stateDict)
utils.save_checkpoint(stateDict, False, test_arch, self.test_checkpoint_dir)
self.assertTrue(os.path.isdir(self.test_checkpoint_dir))
test_checkpoint_file_path = os.path.join(self.test_checkpoint_dir, f'{test_arch}_{test_epoch}_checkpoint.pth.tar')
self.assertTrue(os.path.isfile(test_checkpoint_file_path))
net = utils.resumeFromPath(test_checkpoint_file_path, net, test_optim)
| import unittest
import torch
import os
import shutil
from .context import msdnet
from .context import utils
from .context import train_acc
from .context import data_loader
class Object(object):
pass
class TestMSDNet(unittest.TestCase):
imagenet_args = Object()
test_checkpoint_dir = "test/checkpoint"
def setUp(self):
args = Object()
test_growFact = "1-2-4-4".split("-")
test_bnFact = "1-2-4-4".split("-")
args_dict = {
'grFactor': list(map(int, test_growFact)),
'bnFactor': list(map(int, test_bnFact)),
'nBlocks': 5,
'nChannels': 32,
'base': 4,
'stepmode': 'even',
'step': 4,
'growthRate': 16,
'prune': 'max',
'bottleneck': True,
'data': 'ImageNet',
'nScales': len(test_growFact),
'reduction': 0.5
}
for key in args_dict:
setattr(args, key, args_dict[key])
self.imagenet_args = args
def tearDown(self):
if os.path.isdir(self.test_checkpoint_dir):
shutil.rmtree(self.test_checkpoint_dir)
def test000_getMSDNetFromUtils_noException(self):
net = utils.get_msd_net_model()
def test010_createMSDNetForImageNet_noException(self):
self.assertTrue(len(self.imagenet_args.grFactor) == 4)
self.assertTrue(len(self.imagenet_args.bnFactor) == 4)
net = msdnet.models.msdnet(self.imagenet_args)
self.assertIsNotNone(net)
def test020_forwardingOfMSDNet_forImagenet_noException(self):
net = msdnet.models.msdnet(self.imagenet_args)
test_img = torch.rand(1, 3, 224, 224)
res = net(test_img)
self.assertIsNotNone(res)
self.assertEqual(len(res), self.imagenet_args.nBlocks)
def test030_calculateAccuracyOfMsdNetOutput_noException(self):
net = msdnet.models.msdnet(self.imagenet_args)
loader, _, _ = data_loader.get_zipped_dataloaders(data_loader.REDUCED_SET_PATH, 1)
(test_img, test_target) = next(iter(loader))
output = net(test_img)
if not isinstance(output, list):
output = [output]
for i in range(len(output)):
train_acc(output[i].data, test_target, topk=(1,5))
def test040_CreateAndResumeCheckpoint_NoException(self):
net = msdnet.models.msdnet(self.imagenet_args)
test_epoch = 4
test_arch = 'msdnet'
test_best_acc = 4.2
test_lr = 0.001
test_mom = 0.9
test_weight_decay = 0.1
test_optim = torch.optim.SGD(net.parameters(), lr=test_lr, momentum=test_mom, weight_decay=test_weight_decay)
stateDict = utils.getStateDict(net, test_epoch, test_arch, test_best_acc, test_optim)
self.assertIsNotNone(stateDict)
utils.save_checkpoint(stateDict, False, test_arch, self.test_checkpoint_dir)
self.assertTrue(os.path.isdir(self.test_checkpoint_dir))
test_checkpoint_file_path = os.path.join(self.test_checkpoint_dir, f'{test_arch}_{test_epoch}_checkpoint.pth.tar')
self.assertTrue(os.path.isfile(test_checkpoint_file_path))
net = utils.resumeFromPath(test_checkpoint_file_path, net, test_optim)
| none | 1 | 2.299874 | 2 | |
main.py | NathanPhanX/Music-Generation-A.I | 0 | 6613884 | <gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
import progressbar
import time
import gc
from scipy.io.wavfile import read, write
from keras.models import Model, load_model
from keras.layers import LSTM, LeakyReLU, Input, RepeatVector, TimeDistributed, Dense
# from keras.callbacks import ModelCheckpoint, EarlyStopping
model_path = 'Model\\MusicAI.h5' # Path to save the AI model
sequence = 64 # Number of data AI look at to predict
sequence_loop = 3 # number of loop for each encoder and decoder
layer = 192 # Number of layers
validation_split = 0.8 # Ratio of data that is used for training
patience = 5 # Number of epoch to wait before terminate training the model
batch_size = 32 # Size of batch
epoch = 2 # number of epoch
training_step = 2 # Number of steps used for training before new epoch
default_rate = 44100 # Default rate for .wav file
sample_rate = 441 # The rate used for getting data from wav file (100 times lower than default rate)
precision = 'int16' # Type of precision of data
max_precision = 32767 # The maximum number in the sound data
min_precision = -32768 # The minimum number in the sound data
num_song = 20 # Number of song in the data
debug = True # Display all necessary information if true
def load_data(song_index):
x_train, y_train, x_test, y_test = [], [], [], []
widgets = ['Loading Data: ', progressbar.Bar()]
# Read the .wav file and eliminate the non-essential
rate, music = read('Data\\tobu.wav')
music = music[int(len(music) * 0.1): int(len(music) * 0.9)]
index = int(len(music) / (20 - song_index))
bar = progressbar.ProgressBar(maxval=len(music) - sequence - 1, widgets=widgets)
bar.start()
# Load data
for i in range(0, int(len(music) / 20), int(default_rate / sample_rate)):
bar.update(i)
# Hot encode label
label = np.zeros((2, int(max_precision - min_precision)), dtype=precision)
label[0][music[index + i + sequence][0] - min_precision - 1] = 1
label[1][music[index + i + sequence][1] - min_precision - 1] = 1
# print(str('[' + str(np.argmax(label[0]) + min_precision)) + ' ' + str(np.argmax(label[1]) + min_precision) + ']')
# print(str('[' + str(np.argmin(label[0]) - max_precision)) + ' ' + str(np.argmin(label[1]) - max_precision) + ']')
if i < len(music) / 20 * validation_split:
x_train.append(music[index + i: index + i + sequence, :])
y_train.append(label)
else:
x_test.append(music[index + i: index + i + sequence, :])
y_test.append(label)
bar.finish()
# Convert data into single precision (FP32)
x_train = np.asarray(x_train).astype('float32')
x_test = np.asarray(x_test).astype('float32')
y_train = np.asarray(y_train).astype('float16')
y_test = np.asarray(y_test).astype('float16')
# Normalize data
x_train = (x_train - min_precision) / (max_precision - min_precision)
x_test = (x_test - min_precision) / (max_precision - min_precision)
if debug:
print(np.shape(x_train))
print(np.shape(y_train))
print(np.shape(x_test))
print(np.shape(y_test))
return x_train, y_train, x_test, y_test
def encoder_decoder_lstm():
# Input node of the neural network for 2 channels
input_x = Input(shape=(sequence, 2))
x = LSTM(layer, return_sequences=True)(input_x)
# Encoder
for i in range(sequence_loop):
x = LSTM(int(layer / (2 ** (i + 1))), return_sequences=True)(x)
x = LSTM(int(layer / (2 ** (i + 1))), return_sequences=True)(x)
x = LeakyReLU()(x)
# Hidden state
x = LSTM(int(layer / (2 ** (sequence_loop + 1))))(x)
x = RepeatVector(sequence)(x)
# Decoder
for i in range(sequence_loop):
x = LSTM(int(layer / (2 ** (3 - i))), return_sequences=True)(x)
x = LSTM(int(layer / (2 ** (3 - i))), return_sequences=True)(x)
x = LeakyReLU()(x)
x = LSTM(4)(x)
x = RepeatVector(2)(x)
# output
output_x = TimeDistributed(Dense(max_precision - min_precision, activation='softmax'))(x)
model = Model(inputs=input_x, outputs=output_x)
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['acc'])
if debug:
print(model.summary())
return model
def plot_history(loss, acc):
fig = plt.figure()
result = fig.add_subplot(1, 1, 1)
result.plot(loss, label='loss')
result.plot(acc, label='acc')
plt.savefig(model_path.split('h5')[0] + '.png')
def train():
# Build model
model = encoder_decoder_lstm()
loss, acc = [], []
# Start training
for i in range(epoch):
print('Epoch: ' + str(i + 1))
for j in range(num_song - 1):
# Load data and train model
x_train, y_train, x_test, y_test = load_data(j)
widgets = ['Training: ', progressbar.Bar()]
bar = progressbar.ProgressBar(maxval=(len(x_train) + len(x_test)) * training_step, widgets=widgets)
bar.start()
for steps in range(training_step):
for k in range(0, len(x_train), batch_size):
model.train_on_batch(x=x_train[k: k + batch_size], y=y_train[k: k + batch_size], reset_metrics=False)
bar.update(k + (len(x_train) + len(x_test)) * steps)
for k in range(0, len(x_test), batch_size):
model_result = model.test_on_batch(x=x_test[k: k + batch_size], y=y_test[k: k + batch_size], reset_metrics=False, return_dict=True)
loss.append(model_result['loss'])
acc.append(model_result['acc'])
bar.update(k + len(x_train) + (len(x_train) + len(x_test)) * steps)
model.save(model_path)
plot_history(loss, acc)
bar.finish()
# model.fit(x=x_train, y=y_train, validation_data=(x_test, y_test), batch_size=batch_size, epochs=training_step, callbacks=[ModelCheckpoint(filepath=model_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min'), EarlyStopping(monitor='val_loss', mode='min', patience=patience)])
# Free the memory to prevent out of memory
time.sleep(10)
model.reset_metrics()
gc.collect()
def compose_music(seed=np.random.randint(0, 99999), seconds=5):
# Set seed
np.random.seed(seed)
# Generate data and music
data = np.random.randint(np.random.randint(min_precision, -1), np.random.randint(0, max_precision), (1, sequence, 2))
music = np.copy(data)
data = (data - min_precision) / (max_precision - min_precision)
# Load model
model = load_model(model_path)
# This is used for visualization
widgets = ['Generating Music: ', progressbar.Bar()]
bar = progressbar.ProgressBar(maxval=sample_rate * seconds, widgets=widgets)
bar.start()
for i in range(sample_rate * seconds):
bar.update(i)
# Interference
result = model.predict(data)
print(np.shape(result[0]))
print(np.argmax(result[0][0]))
print(np.argmax(result[0][1]))
print(result[0][0])
input('wait1')
final_result = [int(np.argmax(result[0][0]) - max_precision), int(np.argmax(result[0][1]) - max_precision)]
final_result = np.asarray(final_result)
final_result = np.expand_dims(final_result, axis=0)
final_result = np.expand_dims(final_result, axis=0)
model.reset_states()
model.reset_metrics()
# Add result to music
music = np.concatenate((music, final_result), axis=1)
# Process data for the next interference
next_data = (final_result - min_precision) / (max_precision - min_precision)
data = data[:, len(next_data):, :]
data = np.concatenate((data, next_data), axis=1)
bar.finish()
if debug:
print(np.shape(music[0]))
music = music.astype(precision)
write('Generated_Music.wav', sample_rate, music[0])
def main():
print("Enter 1 to train the neural network, 2 to compose music")
while True:
user = input('Enter: ')
flag = True
if user == '1':
train()
elif user == '2':
user_seed = input('Enter random number to compose different music or leave blank for random: ')
if user_seed == '' or user_seed.isdigit():
if user_seed == '':
user_seed = np.random.randint(0, 99999)
user_time = input('Enter the number of seconds of music to be generated or leave blank for default 10 seconds: ')
if user_time == '':
user_time = 10
if user_time.isdigit():
compose_music(user_seed, user_time)
else:
print('Invalid choice')
flag = False
else:
print('Invalid choice')
flag = False
if flag:
break
if __name__ == '__main__':
compose_music()
input('wait')
a, b = read('Generated_Music.wav')
for i in range(len(b)):
print(b[i])
input('wait')
compose_music()
| import numpy as np
import matplotlib.pyplot as plt
import progressbar
import time
import gc
from scipy.io.wavfile import read, write
from keras.models import Model, load_model
from keras.layers import LSTM, LeakyReLU, Input, RepeatVector, TimeDistributed, Dense
# from keras.callbacks import ModelCheckpoint, EarlyStopping
model_path = 'Model\\MusicAI.h5' # Path to save the AI model
sequence = 64 # Number of data AI look at to predict
sequence_loop = 3 # number of loop for each encoder and decoder
layer = 192 # Number of layers
validation_split = 0.8 # Ratio of data that is used for training
patience = 5 # Number of epoch to wait before terminate training the model
batch_size = 32 # Size of batch
epoch = 2 # number of epoch
training_step = 2 # Number of steps used for training before new epoch
default_rate = 44100 # Default rate for .wav file
sample_rate = 441 # The rate used for getting data from wav file (100 times lower than default rate)
precision = 'int16' # Type of precision of data
max_precision = 32767 # The maximum number in the sound data
min_precision = -32768 # The minimum number in the sound data
num_song = 20 # Number of song in the data
debug = True # Display all necessary information if true
def load_data(song_index):
x_train, y_train, x_test, y_test = [], [], [], []
widgets = ['Loading Data: ', progressbar.Bar()]
# Read the .wav file and eliminate the non-essential
rate, music = read('Data\\tobu.wav')
music = music[int(len(music) * 0.1): int(len(music) * 0.9)]
index = int(len(music) / (20 - song_index))
bar = progressbar.ProgressBar(maxval=len(music) - sequence - 1, widgets=widgets)
bar.start()
# Load data
for i in range(0, int(len(music) / 20), int(default_rate / sample_rate)):
bar.update(i)
# Hot encode label
label = np.zeros((2, int(max_precision - min_precision)), dtype=precision)
label[0][music[index + i + sequence][0] - min_precision - 1] = 1
label[1][music[index + i + sequence][1] - min_precision - 1] = 1
# print(str('[' + str(np.argmax(label[0]) + min_precision)) + ' ' + str(np.argmax(label[1]) + min_precision) + ']')
# print(str('[' + str(np.argmin(label[0]) - max_precision)) + ' ' + str(np.argmin(label[1]) - max_precision) + ']')
if i < len(music) / 20 * validation_split:
x_train.append(music[index + i: index + i + sequence, :])
y_train.append(label)
else:
x_test.append(music[index + i: index + i + sequence, :])
y_test.append(label)
bar.finish()
# Convert data into single precision (FP32)
x_train = np.asarray(x_train).astype('float32')
x_test = np.asarray(x_test).astype('float32')
y_train = np.asarray(y_train).astype('float16')
y_test = np.asarray(y_test).astype('float16')
# Normalize data
x_train = (x_train - min_precision) / (max_precision - min_precision)
x_test = (x_test - min_precision) / (max_precision - min_precision)
if debug:
print(np.shape(x_train))
print(np.shape(y_train))
print(np.shape(x_test))
print(np.shape(y_test))
return x_train, y_train, x_test, y_test
def encoder_decoder_lstm():
# Input node of the neural network for 2 channels
input_x = Input(shape=(sequence, 2))
x = LSTM(layer, return_sequences=True)(input_x)
# Encoder
for i in range(sequence_loop):
x = LSTM(int(layer / (2 ** (i + 1))), return_sequences=True)(x)
x = LSTM(int(layer / (2 ** (i + 1))), return_sequences=True)(x)
x = LeakyReLU()(x)
# Hidden state
x = LSTM(int(layer / (2 ** (sequence_loop + 1))))(x)
x = RepeatVector(sequence)(x)
# Decoder
for i in range(sequence_loop):
x = LSTM(int(layer / (2 ** (3 - i))), return_sequences=True)(x)
x = LSTM(int(layer / (2 ** (3 - i))), return_sequences=True)(x)
x = LeakyReLU()(x)
x = LSTM(4)(x)
x = RepeatVector(2)(x)
# output
output_x = TimeDistributed(Dense(max_precision - min_precision, activation='softmax'))(x)
model = Model(inputs=input_x, outputs=output_x)
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['acc'])
if debug:
print(model.summary())
return model
def plot_history(loss, acc):
fig = plt.figure()
result = fig.add_subplot(1, 1, 1)
result.plot(loss, label='loss')
result.plot(acc, label='acc')
plt.savefig(model_path.split('h5')[0] + '.png')
def train():
# Build model
model = encoder_decoder_lstm()
loss, acc = [], []
# Start training
for i in range(epoch):
print('Epoch: ' + str(i + 1))
for j in range(num_song - 1):
# Load data and train model
x_train, y_train, x_test, y_test = load_data(j)
widgets = ['Training: ', progressbar.Bar()]
bar = progressbar.ProgressBar(maxval=(len(x_train) + len(x_test)) * training_step, widgets=widgets)
bar.start()
for steps in range(training_step):
for k in range(0, len(x_train), batch_size):
model.train_on_batch(x=x_train[k: k + batch_size], y=y_train[k: k + batch_size], reset_metrics=False)
bar.update(k + (len(x_train) + len(x_test)) * steps)
for k in range(0, len(x_test), batch_size):
model_result = model.test_on_batch(x=x_test[k: k + batch_size], y=y_test[k: k + batch_size], reset_metrics=False, return_dict=True)
loss.append(model_result['loss'])
acc.append(model_result['acc'])
bar.update(k + len(x_train) + (len(x_train) + len(x_test)) * steps)
model.save(model_path)
plot_history(loss, acc)
bar.finish()
# model.fit(x=x_train, y=y_train, validation_data=(x_test, y_test), batch_size=batch_size, epochs=training_step, callbacks=[ModelCheckpoint(filepath=model_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min'), EarlyStopping(monitor='val_loss', mode='min', patience=patience)])
# Free the memory to prevent out of memory
time.sleep(10)
model.reset_metrics()
gc.collect()
def compose_music(seed=np.random.randint(0, 99999), seconds=5):
# Set seed
np.random.seed(seed)
# Generate data and music
data = np.random.randint(np.random.randint(min_precision, -1), np.random.randint(0, max_precision), (1, sequence, 2))
music = np.copy(data)
data = (data - min_precision) / (max_precision - min_precision)
# Load model
model = load_model(model_path)
# This is used for visualization
widgets = ['Generating Music: ', progressbar.Bar()]
bar = progressbar.ProgressBar(maxval=sample_rate * seconds, widgets=widgets)
bar.start()
for i in range(sample_rate * seconds):
bar.update(i)
# Interference
result = model.predict(data)
print(np.shape(result[0]))
print(np.argmax(result[0][0]))
print(np.argmax(result[0][1]))
print(result[0][0])
input('wait1')
final_result = [int(np.argmax(result[0][0]) - max_precision), int(np.argmax(result[0][1]) - max_precision)]
final_result = np.asarray(final_result)
final_result = np.expand_dims(final_result, axis=0)
final_result = np.expand_dims(final_result, axis=0)
model.reset_states()
model.reset_metrics()
# Add result to music
music = np.concatenate((music, final_result), axis=1)
# Process data for the next interference
next_data = (final_result - min_precision) / (max_precision - min_precision)
data = data[:, len(next_data):, :]
data = np.concatenate((data, next_data), axis=1)
bar.finish()
if debug:
print(np.shape(music[0]))
music = music.astype(precision)
write('Generated_Music.wav', sample_rate, music[0])
def main():
print("Enter 1 to train the neural network, 2 to compose music")
while True:
user = input('Enter: ')
flag = True
if user == '1':
train()
elif user == '2':
user_seed = input('Enter random number to compose different music or leave blank for random: ')
if user_seed == '' or user_seed.isdigit():
if user_seed == '':
user_seed = np.random.randint(0, 99999)
user_time = input('Enter the number of seconds of music to be generated or leave blank for default 10 seconds: ')
if user_time == '':
user_time = 10
if user_time.isdigit():
compose_music(user_seed, user_time)
else:
print('Invalid choice')
flag = False
else:
print('Invalid choice')
flag = False
if flag:
break
if __name__ == '__main__':
compose_music()
input('wait')
a, b = read('Generated_Music.wav')
for i in range(len(b)):
print(b[i])
input('wait')
compose_music() | en | 0.611028 | # from keras.callbacks import ModelCheckpoint, EarlyStopping # Path to save the AI model # Number of data AI look at to predict # number of loop for each encoder and decoder # Number of layers # Ratio of data that is used for training # Number of epoch to wait before terminate training the model # Size of batch # number of epoch # Number of steps used for training before new epoch # Default rate for .wav file # The rate used for getting data from wav file (100 times lower than default rate) # Type of precision of data # The maximum number in the sound data # The minimum number in the sound data # Number of song in the data # Display all necessary information if true # Read the .wav file and eliminate the non-essential # Load data # Hot encode label # print(str('[' + str(np.argmax(label[0]) + min_precision)) + ' ' + str(np.argmax(label[1]) + min_precision) + ']') # print(str('[' + str(np.argmin(label[0]) - max_precision)) + ' ' + str(np.argmin(label[1]) - max_precision) + ']') # Convert data into single precision (FP32) # Normalize data # Input node of the neural network for 2 channels # Encoder # Hidden state # Decoder # output # Build model # Start training # Load data and train model # model.fit(x=x_train, y=y_train, validation_data=(x_test, y_test), batch_size=batch_size, epochs=training_step, callbacks=[ModelCheckpoint(filepath=model_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min'), EarlyStopping(monitor='val_loss', mode='min', patience=patience)]) # Free the memory to prevent out of memory # Set seed # Generate data and music # Load model # This is used for visualization # Interference # Add result to music # Process data for the next interference | 2.656943 | 3 |
LRE/test.py | MashowJ/Latte | 1 | 6613885 | <filename>LRE/test.py<gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
from lre import main
if __name__ == '__main__':
print('run main')
main.run()
| <filename>LRE/test.py<gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
from lre import main
if __name__ == '__main__':
print('run main')
main.run()
| en | 0.44423 | #!/usr/bin/python # -*- coding: utf-8 -*- | 1.259677 | 1 |
src/pyscaffoldext/pyproject/templates/__init__.py | abravalheri/pyscaffoldext-pyproject | 0 | 6613886 | # -*- coding: utf-8 -*-
"""
Templates for all files this extension provides
"""
import os.path
import string
from pkgutil import get_data
def get_template(name):
"""Retrieve the template by name
Args:
name: name of template without suffix
Returns:
:obj:`string.Template`: template
"""
namespace, pkg_name = __name__.split(".")[:2]
file_name = "{name}.template".format(name=name)
data = get_data(namespace, os.path.join(pkg_name, "templates", file_name))
return string.Template(data.decode(encoding='utf8'))
def pyproject_toml(opts):
"""Template of pyproject.toml
Args:
opts: mapping parameters as dictionary
Returns:
str: file content as string
"""
template = get_template("pyproject_toml")
return template.substitute(opts)
| # -*- coding: utf-8 -*-
"""
Templates for all files this extension provides
"""
import os.path
import string
from pkgutil import get_data
def get_template(name):
"""Retrieve the template by name
Args:
name: name of template without suffix
Returns:
:obj:`string.Template`: template
"""
namespace, pkg_name = __name__.split(".")[:2]
file_name = "{name}.template".format(name=name)
data = get_data(namespace, os.path.join(pkg_name, "templates", file_name))
return string.Template(data.decode(encoding='utf8'))
def pyproject_toml(opts):
"""Template of pyproject.toml
Args:
opts: mapping parameters as dictionary
Returns:
str: file content as string
"""
template = get_template("pyproject_toml")
return template.substitute(opts)
| en | 0.513847 | # -*- coding: utf-8 -*- Templates for all files this extension provides Retrieve the template by name Args: name: name of template without suffix Returns: :obj:`string.Template`: template Template of pyproject.toml Args: opts: mapping parameters as dictionary Returns: str: file content as string | 2.879262 | 3 |
sample/my_module.py | seawolf42/cloud-functions-dispatch | 0 | 6613887 | <gh_stars>0
from functions import my_func
def call_dispatched_function():
a = 1
b = 2
my_func(a, b)
| from functions import my_func
def call_dispatched_function():
a = 1
b = 2
my_func(a, b) | none | 1 | 1.980759 | 2 | |
src/VioNet/models/v_d_config.py | davidGCR/VioDenseDuplication | 3 | 6613888 | <gh_stars>1-10
# '/Users/davidchoqueluqueroman/Documents/CODIGOS_SOURCES/pytorch-i3d/models/rgb_imagenet.pt'
# '/media/david/datos/Violence DATA/VioNet_weights/pytorch_i3d/rgb_imagenet.pt
#'/content/drive/My Drive/VIOLENCE DATA/Pretrained_Models/pytorch_i3d/rgb_imagenet.pt'
# '/content/DATASETS/Pretrained_Models/rgb_imagenet.pt'
VD_CONFIG = {
'backbone_name':'3dresnet', #i3d, 3dresnet
'final_endpoint':'Mixed_4e', #Mixed_4e, so far, only for i3d
'roi_layer_output':8,
'roi_with_temporal_pool':True,
'roi_spatial_scale':16,
'fc_input_dim':528, #I3D-->528 for Mixed_4e, 832 for Mixed_4f, 3dResNet-->1024
'roi_layer_type':'RoIAlign',
'pretrained_backbone_model': '/Users/davidchoqueluqueroman/Documents/CODIGOS_SOURCES/pytorch-i3d/models/rgb_imagenet.pt'
}
TWO_STREAM_CONFIG = {
'backbone_name':'i3d', #i3d, 3dresnet
'final_endpoint':'Mixed_4e', #Mixed_4e, so far, only for i3d
'roi_layer_output':8,
'roi_with_temporal_pool':True,
'roi_spatial_scale':16,
# 'fc_input_dim':528, #I3D-->528 for Mixed_4e, 832 for Mixed_4f, 3dResNet-->1024
'roi_layer_type':'RoIAlign',
'pretrained_backbone_model': '/Users/davidchoqueluqueroman/Documents/CODIGOS_SOURCES/pytorch-i3d/models/rgb_imagenet.pt',
'2d_backbone': 'resnet50',
'base_out_layer': 'layer3',
'num_trainable_layers': 3,
'fc_input_dim': 1552
}
TWO_STREAM_CFAM_CONFIG_old = {
'backbone_name':'i3d', #i3d, 3dresnet
'final_endpoint':'Mixed_4e',#'Mixed_5b', #Mixed_4e, so far, only for i3d
'with_roipool': True,
'roi_layer_output':8,
'roi_with_temporal_pool':True,
'roi_spatial_scale':16,
'roi_layer_type':'RoIAlign',
'pretrained_backbone_model': '/media/david/datos/Violence DATA/VioNet_weights/pytorch_i3d/rgb_imagenet.pt',
'freeze_3d': False,
'2d_backbone': 'resnet50',
'base_out_layer': 'layer3',
'num_trainable_layers': 3,
'CFAMBlock_in_channels': 528+1024,#528+1024,#528+1024, #832+2048
'CFAMBlock_out_channels': 145, #1024
'fc_input_dim': 8*8*145,#512#7105#145,#9280,
}
TWO_STREAM_CFAM_CONFIG = {
'name': 'TWO_STREAM_CFAM_CONFIG',
'backbone_name':'i3d', #i3d, 3dresnet
'final_endpoint':'Mixed_4e',#'Mixed_5b', #Mixed_4e, so far, only for i3d
'with_roipool': True,
'head': 'binary',
'roi_layer_output':8,
'roi_with_temporal_pool':True,
'roi_spatial_scale':16,
'roi_layer_type':'RoIAlign',
'pretrained_backbone_model': '/media/david/datos/Violence DATA/VioNet_weights/pytorch_i3d/rgb_imagenet.pt',
'freeze_3d': True,
'2d_backbone': 'resnet50',
'base_out_layer': 'layer3',
'num_trainable_layers': 3,
'CFAMBlock_in_channels': 528+1024,#528+1024,#528+1024, #832+2048
'CFAMBlock_out_channels': 512, #1024
'fc_input_dim': 8*8*512,#512#7105#145,#9280,
'load_weigths': None#'/media/david/datos/Violence DATA/VioNet_pth/rwf-2000_model(TwoStreamVD_Binary_CFam)_head(binary)_stream(rgb)_cv(1)_epochs(100)_num_tubes(4)_framesXtube(16)_tub_sampl_rand(True)_optimizer(Adadelta)_lr(0.001)_note(TWO_STREAM_CFAM_CONFIG+RWF-2000-150frames-motion-maps2-centralframe-corrected)/save_at_epoch-49.chk'#'/media/david/datos/Violence DATA/VioNet_pth/rwf-2000_model(TwoStreamVD_Binary_CFam)_head(binary)_stream(rgb)_cv(1)_epochs(100)_num_tubes(4)_framesXtube(16)_tub_sampl_rand(True)_optimizer(Adadelta)_lr(0.001)_note(TWO_STREAM_CFAM_CONFIG+otherTrack)/save_at_epoch-39.chk'
}
MIL_TWO_STREAM_CFAM_CONFIG = {
'name': 'MIL_TWO_STREAM_CFAM_CONFIG',
'backbone_name':'i3d', #i3d, 3dresnet
'final_endpoint':'Mixed_4e',#'Mixed_5b', #Mixed_4e, so far, only for i3d
'with_roipool': True,
'head': 'regression',
'roi_layer_output':8,
'roi_with_temporal_pool':True,
'roi_spatial_scale':16,
'roi_layer_type':'RoIAlign',
'pretrained_backbone_model': '/media/david/datos/Violence DATA/VioNet_weights/pytorch_i3d/rgb_imagenet.pt',
'freeze_3d': True,
'2d_backbone': 'resnet50',
'base_out_layer': 'layer3',
'num_trainable_layers': 3,
'CFAMBlock_in_channels': 528+1024,#528+1024,#528+1024, #832+2048
'CFAMBlock_out_channels': 512, #1024
'fc_input_dim': 8*8*512,#512#7105#145,#9280,
'load_weigths': None#'/media/david/datos/Violence DATA/VioNet_pth/rwf-2000_model(TwoStreamVD_Binary_CFam)_head(binary)_stream(rgb)_cv(1)_epochs(100)_num_tubes(4)_framesXtube(16)_tub_sampl_rand(True)_optimizer(Adadelta)_lr(0.001)_note(TWO_STREAM_CFAM_CONFIG+RWF-2000-150frames-motion-maps2-centralframe-corrected)/save_at_epoch-49.chk'#'/media/david/datos/Violence DATA/VioNet_pth/rwf-2000_model(TwoStreamVD_Binary_CFam)_head(binary)_stream(rgb)_cv(1)_epochs(100)_num_tubes(4)_framesXtube(16)_tub_sampl_rand(True)_optimizer(Adadelta)_lr(0.001)_note(TWO_STREAM_CFAM_CONFIG+otherTrack)/save_at_epoch-39.chk'
}
TWO_STREAM_CFAM_NO_TUBE_CONFIG = {
'backbone_name':'i3d', #i3d, 3dresnet
'final_endpoint':'Mixed_5b',#'Mixed_5b', #Mixed_4e, so far, only for i3d
'with_roipool': False,
'roi_layer_output':8,
'roi_with_temporal_pool':True,
'roi_spatial_scale':16,
'roi_layer_type':'RoIAlign',
'pretrained_backbone_model': '/media/david/datos/Violence DATA/VioNet_weights/pytorch_i3d/rgb_imagenet.pt',
'freeze_3d': False,
'2d_backbone': 'resnet50',
'base_out_layer': 'layer4',
'num_trainable_layers': 4,
'CFAMBlock_in_channels': 2048+832,#528+1024,#528+1024, #832+2048
'CFAMBlock_out_channels': 1024, #1024
'fc_input_dim': 7*7*1024,#512#7105#145,#9280,
}
TWO_STREAM_CFAM_SLOWRESNET_CONFIG = {
'backbone_name':'3dresnet', #i3d, 3dresnet
# 'final_endpoint':'Mixed_4e', #Mixed_4e, so far, only for i3d
'with_roipool': True,
'roi_layer_output':8,
'roi_with_temporal_pool':True,
'roi_spatial_scale':16,
'roi_layer_type':'RoIAlign',
# 'pretrained_backbone_model': '/media/david/datos/Violence DATA/VioNet_weights/pytorch_i3d/rgb_imagenet.pt',
'freeze_3d': False,
'2d_backbone': 'resnet50',
'base_out_layer': 'layer3',
'num_trainable_layers': 3,
'CFAMBlock_in_channels': 1024+1024, #1024
'CFAMBlock_out_channels': 145, #1024
'fc_input_dim': 9280,#512#7105#145,#9280,
}
TWO_STREAM_REGRESSION_CONFIG = {
'backbone_name':'i3d', #i3d, 3dresnet
'final_endpoint':'Mixed_4e', #Mixed_4e, so far, only for i3d
'roi_layer_output':8,
'roi_with_temporal_pool':True,
'roi_spatial_scale':16,
'roi_layer_type':'RoIAlign',
'pretrained_backbone_model': '/media/david/datos/Violence DATA/VioNet_weights/pytorch_i3d/rgb_imagenet.pt',
'2d_backbone': 'resnet50',
'base_out_layer': 'layer3',
'num_trainable_layers': 0,
'fc_input_dim': 1552
} | # '/Users/davidchoqueluqueroman/Documents/CODIGOS_SOURCES/pytorch-i3d/models/rgb_imagenet.pt'
# '/media/david/datos/Violence DATA/VioNet_weights/pytorch_i3d/rgb_imagenet.pt
#'/content/drive/My Drive/VIOLENCE DATA/Pretrained_Models/pytorch_i3d/rgb_imagenet.pt'
# '/content/DATASETS/Pretrained_Models/rgb_imagenet.pt'
VD_CONFIG = {
'backbone_name':'3dresnet', #i3d, 3dresnet
'final_endpoint':'Mixed_4e', #Mixed_4e, so far, only for i3d
'roi_layer_output':8,
'roi_with_temporal_pool':True,
'roi_spatial_scale':16,
'fc_input_dim':528, #I3D-->528 for Mixed_4e, 832 for Mixed_4f, 3dResNet-->1024
'roi_layer_type':'RoIAlign',
'pretrained_backbone_model': '/Users/davidchoqueluqueroman/Documents/CODIGOS_SOURCES/pytorch-i3d/models/rgb_imagenet.pt'
}
TWO_STREAM_CONFIG = {
'backbone_name':'i3d', #i3d, 3dresnet
'final_endpoint':'Mixed_4e', #Mixed_4e, so far, only for i3d
'roi_layer_output':8,
'roi_with_temporal_pool':True,
'roi_spatial_scale':16,
# 'fc_input_dim':528, #I3D-->528 for Mixed_4e, 832 for Mixed_4f, 3dResNet-->1024
'roi_layer_type':'RoIAlign',
'pretrained_backbone_model': '/Users/davidchoqueluqueroman/Documents/CODIGOS_SOURCES/pytorch-i3d/models/rgb_imagenet.pt',
'2d_backbone': 'resnet50',
'base_out_layer': 'layer3',
'num_trainable_layers': 3,
'fc_input_dim': 1552
}
TWO_STREAM_CFAM_CONFIG_old = {
'backbone_name':'i3d', #i3d, 3dresnet
'final_endpoint':'Mixed_4e',#'Mixed_5b', #Mixed_4e, so far, only for i3d
'with_roipool': True,
'roi_layer_output':8,
'roi_with_temporal_pool':True,
'roi_spatial_scale':16,
'roi_layer_type':'RoIAlign',
'pretrained_backbone_model': '/media/david/datos/Violence DATA/VioNet_weights/pytorch_i3d/rgb_imagenet.pt',
'freeze_3d': False,
'2d_backbone': 'resnet50',
'base_out_layer': 'layer3',
'num_trainable_layers': 3,
'CFAMBlock_in_channels': 528+1024,#528+1024,#528+1024, #832+2048
'CFAMBlock_out_channels': 145, #1024
'fc_input_dim': 8*8*145,#512#7105#145,#9280,
}
TWO_STREAM_CFAM_CONFIG = {
'name': 'TWO_STREAM_CFAM_CONFIG',
'backbone_name':'i3d', #i3d, 3dresnet
'final_endpoint':'Mixed_4e',#'Mixed_5b', #Mixed_4e, so far, only for i3d
'with_roipool': True,
'head': 'binary',
'roi_layer_output':8,
'roi_with_temporal_pool':True,
'roi_spatial_scale':16,
'roi_layer_type':'RoIAlign',
'pretrained_backbone_model': '/media/david/datos/Violence DATA/VioNet_weights/pytorch_i3d/rgb_imagenet.pt',
'freeze_3d': True,
'2d_backbone': 'resnet50',
'base_out_layer': 'layer3',
'num_trainable_layers': 3,
'CFAMBlock_in_channels': 528+1024,#528+1024,#528+1024, #832+2048
'CFAMBlock_out_channels': 512, #1024
'fc_input_dim': 8*8*512,#512#7105#145,#9280,
'load_weigths': None#'/media/david/datos/Violence DATA/VioNet_pth/rwf-2000_model(TwoStreamVD_Binary_CFam)_head(binary)_stream(rgb)_cv(1)_epochs(100)_num_tubes(4)_framesXtube(16)_tub_sampl_rand(True)_optimizer(Adadelta)_lr(0.001)_note(TWO_STREAM_CFAM_CONFIG+RWF-2000-150frames-motion-maps2-centralframe-corrected)/save_at_epoch-49.chk'#'/media/david/datos/Violence DATA/VioNet_pth/rwf-2000_model(TwoStreamVD_Binary_CFam)_head(binary)_stream(rgb)_cv(1)_epochs(100)_num_tubes(4)_framesXtube(16)_tub_sampl_rand(True)_optimizer(Adadelta)_lr(0.001)_note(TWO_STREAM_CFAM_CONFIG+otherTrack)/save_at_epoch-39.chk'
}
MIL_TWO_STREAM_CFAM_CONFIG = {
'name': 'MIL_TWO_STREAM_CFAM_CONFIG',
'backbone_name':'i3d', #i3d, 3dresnet
'final_endpoint':'Mixed_4e',#'Mixed_5b', #Mixed_4e, so far, only for i3d
'with_roipool': True,
'head': 'regression',
'roi_layer_output':8,
'roi_with_temporal_pool':True,
'roi_spatial_scale':16,
'roi_layer_type':'RoIAlign',
'pretrained_backbone_model': '/media/david/datos/Violence DATA/VioNet_weights/pytorch_i3d/rgb_imagenet.pt',
'freeze_3d': True,
'2d_backbone': 'resnet50',
'base_out_layer': 'layer3',
'num_trainable_layers': 3,
'CFAMBlock_in_channels': 528+1024,#528+1024,#528+1024, #832+2048
'CFAMBlock_out_channels': 512, #1024
'fc_input_dim': 8*8*512,#512#7105#145,#9280,
'load_weigths': None#'/media/david/datos/Violence DATA/VioNet_pth/rwf-2000_model(TwoStreamVD_Binary_CFam)_head(binary)_stream(rgb)_cv(1)_epochs(100)_num_tubes(4)_framesXtube(16)_tub_sampl_rand(True)_optimizer(Adadelta)_lr(0.001)_note(TWO_STREAM_CFAM_CONFIG+RWF-2000-150frames-motion-maps2-centralframe-corrected)/save_at_epoch-49.chk'#'/media/david/datos/Violence DATA/VioNet_pth/rwf-2000_model(TwoStreamVD_Binary_CFam)_head(binary)_stream(rgb)_cv(1)_epochs(100)_num_tubes(4)_framesXtube(16)_tub_sampl_rand(True)_optimizer(Adadelta)_lr(0.001)_note(TWO_STREAM_CFAM_CONFIG+otherTrack)/save_at_epoch-39.chk'
}
TWO_STREAM_CFAM_NO_TUBE_CONFIG = {
'backbone_name':'i3d', #i3d, 3dresnet
'final_endpoint':'Mixed_5b',#'Mixed_5b', #Mixed_4e, so far, only for i3d
'with_roipool': False,
'roi_layer_output':8,
'roi_with_temporal_pool':True,
'roi_spatial_scale':16,
'roi_layer_type':'RoIAlign',
'pretrained_backbone_model': '/media/david/datos/Violence DATA/VioNet_weights/pytorch_i3d/rgb_imagenet.pt',
'freeze_3d': False,
'2d_backbone': 'resnet50',
'base_out_layer': 'layer4',
'num_trainable_layers': 4,
'CFAMBlock_in_channels': 2048+832,#528+1024,#528+1024, #832+2048
'CFAMBlock_out_channels': 1024, #1024
'fc_input_dim': 7*7*1024,#512#7105#145,#9280,
}
TWO_STREAM_CFAM_SLOWRESNET_CONFIG = {
'backbone_name':'3dresnet', #i3d, 3dresnet
# 'final_endpoint':'Mixed_4e', #Mixed_4e, so far, only for i3d
'with_roipool': True,
'roi_layer_output':8,
'roi_with_temporal_pool':True,
'roi_spatial_scale':16,
'roi_layer_type':'RoIAlign',
# 'pretrained_backbone_model': '/media/david/datos/Violence DATA/VioNet_weights/pytorch_i3d/rgb_imagenet.pt',
'freeze_3d': False,
'2d_backbone': 'resnet50',
'base_out_layer': 'layer3',
'num_trainable_layers': 3,
'CFAMBlock_in_channels': 1024+1024, #1024
'CFAMBlock_out_channels': 145, #1024
'fc_input_dim': 9280,#512#7105#145,#9280,
}
TWO_STREAM_REGRESSION_CONFIG = {
'backbone_name':'i3d', #i3d, 3dresnet
'final_endpoint':'Mixed_4e', #Mixed_4e, so far, only for i3d
'roi_layer_output':8,
'roi_with_temporal_pool':True,
'roi_spatial_scale':16,
'roi_layer_type':'RoIAlign',
'pretrained_backbone_model': '/media/david/datos/Violence DATA/VioNet_weights/pytorch_i3d/rgb_imagenet.pt',
'2d_backbone': 'resnet50',
'base_out_layer': 'layer3',
'num_trainable_layers': 0,
'fc_input_dim': 1552
} | en | 0.419505 | # '/Users/davidchoqueluqueroman/Documents/CODIGOS_SOURCES/pytorch-i3d/models/rgb_imagenet.pt' # '/media/david/datos/Violence DATA/VioNet_weights/pytorch_i3d/rgb_imagenet.pt #'/content/drive/My Drive/VIOLENCE DATA/Pretrained_Models/pytorch_i3d/rgb_imagenet.pt' # '/content/DATASETS/Pretrained_Models/rgb_imagenet.pt' #i3d, 3dresnet #Mixed_4e, so far, only for i3d #I3D-->528 for Mixed_4e, 832 for Mixed_4f, 3dResNet-->1024 #i3d, 3dresnet #Mixed_4e, so far, only for i3d # 'fc_input_dim':528, #I3D-->528 for Mixed_4e, 832 for Mixed_4f, 3dResNet-->1024 #i3d, 3dresnet #'Mixed_5b', #Mixed_4e, so far, only for i3d #528+1024,#528+1024, #832+2048 #1024 #512#7105#145,#9280, #i3d, 3dresnet #'Mixed_5b', #Mixed_4e, so far, only for i3d #528+1024,#528+1024, #832+2048 #1024 #512#7105#145,#9280, #'/media/david/datos/Violence DATA/VioNet_pth/rwf-2000_model(TwoStreamVD_Binary_CFam)_head(binary)_stream(rgb)_cv(1)_epochs(100)_num_tubes(4)_framesXtube(16)_tub_sampl_rand(True)_optimizer(Adadelta)_lr(0.001)_note(TWO_STREAM_CFAM_CONFIG+RWF-2000-150frames-motion-maps2-centralframe-corrected)/save_at_epoch-49.chk'#'/media/david/datos/Violence DATA/VioNet_pth/rwf-2000_model(TwoStreamVD_Binary_CFam)_head(binary)_stream(rgb)_cv(1)_epochs(100)_num_tubes(4)_framesXtube(16)_tub_sampl_rand(True)_optimizer(Adadelta)_lr(0.001)_note(TWO_STREAM_CFAM_CONFIG+otherTrack)/save_at_epoch-39.chk' #i3d, 3dresnet #'Mixed_5b', #Mixed_4e, so far, only for i3d #528+1024,#528+1024, #832+2048 #1024 #512#7105#145,#9280, #'/media/david/datos/Violence DATA/VioNet_pth/rwf-2000_model(TwoStreamVD_Binary_CFam)_head(binary)_stream(rgb)_cv(1)_epochs(100)_num_tubes(4)_framesXtube(16)_tub_sampl_rand(True)_optimizer(Adadelta)_lr(0.001)_note(TWO_STREAM_CFAM_CONFIG+RWF-2000-150frames-motion-maps2-centralframe-corrected)/save_at_epoch-49.chk'#'/media/david/datos/Violence DATA/VioNet_pth/rwf-2000_model(TwoStreamVD_Binary_CFam)_head(binary)_stream(rgb)_cv(1)_epochs(100)_num_tubes(4)_framesXtube(16)_tub_sampl_rand(True)_optimizer(Adadelta)_lr(0.001)_note(TWO_STREAM_CFAM_CONFIG+otherTrack)/save_at_epoch-39.chk' #i3d, 3dresnet #'Mixed_5b', #Mixed_4e, so far, only for i3d #528+1024,#528+1024, #832+2048 #1024 #512#7105#145,#9280, #i3d, 3dresnet # 'final_endpoint':'Mixed_4e', #Mixed_4e, so far, only for i3d # 'pretrained_backbone_model': '/media/david/datos/Violence DATA/VioNet_weights/pytorch_i3d/rgb_imagenet.pt', #1024 #1024 #512#7105#145,#9280, #i3d, 3dresnet #Mixed_4e, so far, only for i3d | 1.365699 | 1 |
src/i6/__init__.py | kruserr/i6 | 1 | 6613889 | <filename>src/i6/__init__.py
"""
i6
A standardized collection of python libs and tools.
Example:
```
import i6
# Http request
i6.http.get('EXAMPLE/api/endpoint/')
# Attempt to use Dockerfile in cwd
centos = i6.container().run()
# Update container to git repo latest tag
i6.git({
'container': centos,
}).update()
# Start FTP server
i6.ftp().run()
# Connect to postgres database
db = i6.db(
info = False,
debug = True,
db_conn_string ='postgresql+psycopg2://user:password@host:5432/database'
)
```
:copyright: 2020 kruserr
:license: MIT
"""
import i6.crypto
from i6.db import db
from i6.cli import cli
from i6.log import log
from i6.ftp import ftp
from i6.git import git
from i6.http import http
from i6.util import util
from i6.shell import shell
from i6.container import container
from i6.classes.Base import Base
from i6.classes.List import List
| <filename>src/i6/__init__.py
"""
i6
A standardized collection of python libs and tools.
Example:
```
import i6
# Http request
i6.http.get('EXAMPLE/api/endpoint/')
# Attempt to use Dockerfile in cwd
centos = i6.container().run()
# Update container to git repo latest tag
i6.git({
'container': centos,
}).update()
# Start FTP server
i6.ftp().run()
# Connect to postgres database
db = i6.db(
info = False,
debug = True,
db_conn_string ='postgresql+psycopg2://user:password@host:5432/database'
)
```
:copyright: 2020 kruserr
:license: MIT
"""
import i6.crypto
from i6.db import db
from i6.cli import cli
from i6.log import log
from i6.ftp import ftp
from i6.git import git
from i6.http import http
from i6.util import util
from i6.shell import shell
from i6.container import container
from i6.classes.Base import Base
from i6.classes.List import List
| en | 0.337297 | i6 A standardized collection of python libs and tools. Example: ``` import i6 # Http request i6.http.get('EXAMPLE/api/endpoint/') # Attempt to use Dockerfile in cwd centos = i6.container().run() # Update container to git repo latest tag i6.git({ 'container': centos, }).update() # Start FTP server i6.ftp().run() # Connect to postgres database db = i6.db( info = False, debug = True, db_conn_string ='postgresql+psycopg2://user:password@host:5432/database' ) ``` :copyright: 2020 kruserr :license: MIT | 2.212827 | 2 |
nearest_building.py | PSMA/beta-nearest-building | 0 | 6613890 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
NearestBuildingsToPoint
A QGIS plugin
This plug in plots buildings that are closest to a point within a given radius
Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/
-------------------
begin : 2018-09-11
git sha : $Format:%H$
copyright : (C) 2018 by PSMA
email : <EMAIL>
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt5.QtCore import * # QSettings, QTranslator, qVersion, QCoreApplication
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QAction
from PyQt5.QtWidgets import QMessageBox
# Initialize Qt resources from file resources.py
from .resources import *
# Import the code for the dialog
from .nearest_building_dialog import NearestBuildingsToPointDialog
import os.path
from pathlib import Path
#psma - - -
import requests
import json
import re
#ref: https://gis.stackexchange.com/questions/253733/how-to-get-co-ordinates-of-points-on-mouse-click-in-pyqgis
from qgis.gui import QgsMapToolEmitPoint
#https://gis.stackexchange.com/questions/255803/how-do-i-fix-name-iface-is-not-defined
from qgis.utils import iface
from .ClickManagement import * #to manage canvas
from .ProjectGDA import *
def resolve_file_path(name, basepath=None):
# Handler for resolving to the plugin directory
# Pinched from https://gis.stackexchange.com/a/130031
if not basepath:
basepath = os.path.dirname(os.path.realpath(__file__))
return os.path.join(basepath, name)
def load_api_key_config():
# Load the API Key from file, if doesn't exist return an empty string
creds_file = Path(resolve_file_path("credentials.json"))
if creds_file.is_file():
with open(creds_file) as f:
data = json.load(f)
return data["api_key"]
return ''
class NearestBuildingsToPoint:
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
"""
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value('locale/userLocale')[0:2]
locale_path = os.path.join(
self.plugin_dir,
'i18n',
'NearestBuildingsToPoint_{}.qm'.format(locale))
if os.path.exists(locale_path):
self.translator = QTranslator()
self.translator.load(locale_path)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
# Create the dialog (after translation) and keep reference
self.dlg = NearestBuildingsToPointDialog()
# Declare instance attributes
self.actions = []
self.menu = self.tr(u'&Nearest Building')
# TODO: We are going to let the user set this up in a future iteration
self.toolbar = self.iface.addToolBar(u'NearestBuildingsToPoint')
self.toolbar.setObjectName(u'NearestBuildingsToPoint')
# Load the API Key from the config file and set it within the form
self.api_key = load_api_key_config()
self.dlg.input_apikey.setText(self.api_key)
# Setup the click handler to save the updated API key
self.dlg.btn_api_set.clicked.connect(self.set_api_key)
# noinspection PyMethodMayBeStatic
def tr(self, message):
self.dlg.btn_buildings_by_latlon.clicked.connect(self.buildingpoint_keyboard)
self.dlg.btn_get_buildings_by_map.clicked.connect(self.buildingpoint_click)
# store layer id
self.layerid = ''
self.layer = None
return QCoreApplication.translate('NearestBuildingsToPoint', message)
def set_api_key(self):
new_key = self.dlg.input_apikey.text()
creds_file = Path(resolve_file_path("credentials.json"))
with open(creds_file, 'w') as f:
json.dump({'api_key': new_key}, f)
def add_action(
self,
icon_path,
text,
callback,
enabled_flag=True,
add_to_menu=True,
add_to_toolbar=True,
status_tip=None,
whats_this=None,
parent=None):
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setStatusTip(status_tip)
if whats_this is not None:
action.setWhatsThis(whats_this)
if add_to_toolbar:
self.toolbar.addAction(action)
if add_to_menu:
self.iface.addPluginToMenu(
self.menu,
action)
self.actions.append(action)
return action
def initGui(self):
icon_path = ':/plugins/nearest_building/geoscape_ico.png'
self.add_action(
icon_path,
text=self.tr(u'Nearest Buildings'),
callback=self.run,
parent=self.iface.mainWindow())
def unload(self):
for action in self.actions:
self.iface.removePluginMenu(
self.tr(u'&Nearest Building'),
action)
self.iface.removeToolBarIcon(action)
del self.toolbar
def run(self):
self.dlg.show()
result = self.dlg.exec_()
if result:
pass
def buildingpoint_click(self):
print("in neareast_buildin.py in NearestBuildingsToPoint Class in def buildingpoint_click")
ct = GetClick(self.iface, self.nearest_building);
self.previous_map_tool = self.iface.mapCanvas().mapTool()
self.iface.mapCanvas().setMapTool(ct)
def buildingpoint_keyboard(self):
var_lat = self.dlg.lineEdit.text()
var_long = self.dlg.lineEdit_2.text()
self.get_building_ids(var_lat, var_long, self.dlg.input_radius.text())
def nearest_building(self, point):
pt = pointGDA94(point, self.iface.mapCanvas().mapSettings().destinationCrs())
long_ord = (pt[0])
lat_ord = (pt[1])
self.get_building_ids(lat_ord, long_ord, self.dlg.input_radius.text())
def get_building_ids(self, latx, longy, radiusdist):
bl_url = 'https://api.psma.com.au/beta/v1/buildings/?latLong=' + str(latx) + '%2C' + str(
longy) + '&radius=' + str(radiusdist) + '&page=1&perPage=100&include=footprint2d'
print('Getting buidings: ' + bl_url)
headers = {
"Authorization": self.api_key,
"Accept": "application/json"
}
response = requests.get(bl_url, headers=headers, verify=False)
response_data = response.json()
fc = {
"type": 'FeatureCollection',
"features": []
}
if (response_data['message']):
QMessageBox.about(None, "Error retrieving data", "Error retrieving data " +\
str(response_data['message']))
return None
if len(response_data['data']) > 0:
for item in response_data['data']:
building_id = item['buildingId']
print(item)
fc["features"].append({
"geometry": item['footprint2d'],
"properties": {
"BuildingId": building_id
}
})
vlayer = QgsVectorLayer(json.dumps(fc), "Geoscape", "ogr")
QgsProject.instance().addMapLayer(vlayer)
| # -*- coding: utf-8 -*-
"""
/***************************************************************************
NearestBuildingsToPoint
A QGIS plugin
This plug in plots buildings that are closest to a point within a given radius
Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/
-------------------
begin : 2018-09-11
git sha : $Format:%H$
copyright : (C) 2018 by PSMA
email : <EMAIL>
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt5.QtCore import * # QSettings, QTranslator, qVersion, QCoreApplication
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QAction
from PyQt5.QtWidgets import QMessageBox
# Initialize Qt resources from file resources.py
from .resources import *
# Import the code for the dialog
from .nearest_building_dialog import NearestBuildingsToPointDialog
import os.path
from pathlib import Path
#psma - - -
import requests
import json
import re
#ref: https://gis.stackexchange.com/questions/253733/how-to-get-co-ordinates-of-points-on-mouse-click-in-pyqgis
from qgis.gui import QgsMapToolEmitPoint
#https://gis.stackexchange.com/questions/255803/how-do-i-fix-name-iface-is-not-defined
from qgis.utils import iface
from .ClickManagement import * #to manage canvas
from .ProjectGDA import *
def resolve_file_path(name, basepath=None):
# Handler for resolving to the plugin directory
# Pinched from https://gis.stackexchange.com/a/130031
if not basepath:
basepath = os.path.dirname(os.path.realpath(__file__))
return os.path.join(basepath, name)
def load_api_key_config():
# Load the API Key from file, if doesn't exist return an empty string
creds_file = Path(resolve_file_path("credentials.json"))
if creds_file.is_file():
with open(creds_file) as f:
data = json.load(f)
return data["api_key"]
return ''
class NearestBuildingsToPoint:
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
"""
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value('locale/userLocale')[0:2]
locale_path = os.path.join(
self.plugin_dir,
'i18n',
'NearestBuildingsToPoint_{}.qm'.format(locale))
if os.path.exists(locale_path):
self.translator = QTranslator()
self.translator.load(locale_path)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
# Create the dialog (after translation) and keep reference
self.dlg = NearestBuildingsToPointDialog()
# Declare instance attributes
self.actions = []
self.menu = self.tr(u'&Nearest Building')
# TODO: We are going to let the user set this up in a future iteration
self.toolbar = self.iface.addToolBar(u'NearestBuildingsToPoint')
self.toolbar.setObjectName(u'NearestBuildingsToPoint')
# Load the API Key from the config file and set it within the form
self.api_key = load_api_key_config()
self.dlg.input_apikey.setText(self.api_key)
# Setup the click handler to save the updated API key
self.dlg.btn_api_set.clicked.connect(self.set_api_key)
# noinspection PyMethodMayBeStatic
def tr(self, message):
self.dlg.btn_buildings_by_latlon.clicked.connect(self.buildingpoint_keyboard)
self.dlg.btn_get_buildings_by_map.clicked.connect(self.buildingpoint_click)
# store layer id
self.layerid = ''
self.layer = None
return QCoreApplication.translate('NearestBuildingsToPoint', message)
def set_api_key(self):
new_key = self.dlg.input_apikey.text()
creds_file = Path(resolve_file_path("credentials.json"))
with open(creds_file, 'w') as f:
json.dump({'api_key': new_key}, f)
def add_action(
self,
icon_path,
text,
callback,
enabled_flag=True,
add_to_menu=True,
add_to_toolbar=True,
status_tip=None,
whats_this=None,
parent=None):
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setStatusTip(status_tip)
if whats_this is not None:
action.setWhatsThis(whats_this)
if add_to_toolbar:
self.toolbar.addAction(action)
if add_to_menu:
self.iface.addPluginToMenu(
self.menu,
action)
self.actions.append(action)
return action
def initGui(self):
icon_path = ':/plugins/nearest_building/geoscape_ico.png'
self.add_action(
icon_path,
text=self.tr(u'Nearest Buildings'),
callback=self.run,
parent=self.iface.mainWindow())
def unload(self):
for action in self.actions:
self.iface.removePluginMenu(
self.tr(u'&Nearest Building'),
action)
self.iface.removeToolBarIcon(action)
del self.toolbar
def run(self):
self.dlg.show()
result = self.dlg.exec_()
if result:
pass
def buildingpoint_click(self):
print("in neareast_buildin.py in NearestBuildingsToPoint Class in def buildingpoint_click")
ct = GetClick(self.iface, self.nearest_building);
self.previous_map_tool = self.iface.mapCanvas().mapTool()
self.iface.mapCanvas().setMapTool(ct)
def buildingpoint_keyboard(self):
var_lat = self.dlg.lineEdit.text()
var_long = self.dlg.lineEdit_2.text()
self.get_building_ids(var_lat, var_long, self.dlg.input_radius.text())
def nearest_building(self, point):
pt = pointGDA94(point, self.iface.mapCanvas().mapSettings().destinationCrs())
long_ord = (pt[0])
lat_ord = (pt[1])
self.get_building_ids(lat_ord, long_ord, self.dlg.input_radius.text())
def get_building_ids(self, latx, longy, radiusdist):
bl_url = 'https://api.psma.com.au/beta/v1/buildings/?latLong=' + str(latx) + '%2C' + str(
longy) + '&radius=' + str(radiusdist) + '&page=1&perPage=100&include=footprint2d'
print('Getting buidings: ' + bl_url)
headers = {
"Authorization": self.api_key,
"Accept": "application/json"
}
response = requests.get(bl_url, headers=headers, verify=False)
response_data = response.json()
fc = {
"type": 'FeatureCollection',
"features": []
}
if (response_data['message']):
QMessageBox.about(None, "Error retrieving data", "Error retrieving data " +\
str(response_data['message']))
return None
if len(response_data['data']) > 0:
for item in response_data['data']:
building_id = item['buildingId']
print(item)
fc["features"].append({
"geometry": item['footprint2d'],
"properties": {
"BuildingId": building_id
}
})
vlayer = QgsVectorLayer(json.dumps(fc), "Geoscape", "ogr")
QgsProject.instance().addMapLayer(vlayer)
| en | 0.674886 | # -*- coding: utf-8 -*- /*************************************************************************** NearestBuildingsToPoint A QGIS plugin This plug in plots buildings that are closest to a point within a given radius Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/ ------------------- begin : 2018-09-11 git sha : $Format:%H$ copyright : (C) 2018 by PSMA email : <EMAIL> ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ # QSettings, QTranslator, qVersion, QCoreApplication # Initialize Qt resources from file resources.py # Import the code for the dialog #psma - - - #ref: https://gis.stackexchange.com/questions/253733/how-to-get-co-ordinates-of-points-on-mouse-click-in-pyqgis #https://gis.stackexchange.com/questions/255803/how-do-i-fix-name-iface-is-not-defined #to manage canvas # Handler for resolving to the plugin directory # Pinched from https://gis.stackexchange.com/a/130031 # Load the API Key from file, if doesn't exist return an empty string Constructor. :param iface: An interface instance that will be passed to this class which provides the hook by which you can manipulate the QGIS application at run time. :type iface: QgsInterface # Save reference to the QGIS interface # initialize plugin directory # initialize locale # Create the dialog (after translation) and keep reference # Declare instance attributes # TODO: We are going to let the user set this up in a future iteration # Load the API Key from the config file and set it within the form # Setup the click handler to save the updated API key # noinspection PyMethodMayBeStatic # store layer id | 1.793916 | 2 |
test/model/test_entry.py | HansBug/jentry | 0 | 6613891 | <reponame>HansBug/jentry
import pytest
from jentry.model import JavaEntry
@pytest.mark.unittest
class TestModelEntry:
def test_java_entry(self):
e = JavaEntry('src/Main.java', 'main.package', 'MainClazz')
assert e.filename == 'src/Main.java'
assert e.package == 'main.package'
assert e.clazz == 'MainClazz'
assert e.full_name == 'main.package.MainClazz'
assert str(e) == 'main.package.MainClazz'
assert repr(e) == "<JavaEntry class: main.package.MainClazz, filename: 'src/Main.java'>"
def test_java_entry_without_package(self):
e = JavaEntry('src/Main.java', None, 'MainClazz')
assert e.filename == 'src/Main.java'
assert e.package is None
assert e.clazz == 'MainClazz'
assert e.full_name == 'MainClazz'
assert str(e) == 'MainClazz'
assert repr(e) == "<JavaEntry class: MainClazz, filename: 'src/Main.java'>"
def test_java_entry_without_filename(self):
e = JavaEntry(None, 'main.package', 'MainClazz')
assert e.filename is None
assert e.package == 'main.package'
assert e.clazz == 'MainClazz'
assert e.full_name == 'main.package.MainClazz'
assert str(e) == 'main.package.MainClazz'
assert repr(e) == "<JavaEntry class: main.package.MainClazz>"
| import pytest
from jentry.model import JavaEntry
@pytest.mark.unittest
class TestModelEntry:
def test_java_entry(self):
e = JavaEntry('src/Main.java', 'main.package', 'MainClazz')
assert e.filename == 'src/Main.java'
assert e.package == 'main.package'
assert e.clazz == 'MainClazz'
assert e.full_name == 'main.package.MainClazz'
assert str(e) == 'main.package.MainClazz'
assert repr(e) == "<JavaEntry class: main.package.MainClazz, filename: 'src/Main.java'>"
def test_java_entry_without_package(self):
e = JavaEntry('src/Main.java', None, 'MainClazz')
assert e.filename == 'src/Main.java'
assert e.package is None
assert e.clazz == 'MainClazz'
assert e.full_name == 'MainClazz'
assert str(e) == 'MainClazz'
assert repr(e) == "<JavaEntry class: MainClazz, filename: 'src/Main.java'>"
def test_java_entry_without_filename(self):
e = JavaEntry(None, 'main.package', 'MainClazz')
assert e.filename is None
assert e.package == 'main.package'
assert e.clazz == 'MainClazz'
assert e.full_name == 'main.package.MainClazz'
assert str(e) == 'main.package.MainClazz'
assert repr(e) == "<JavaEntry class: main.package.MainClazz>" | none | 1 | 2.749995 | 3 | |
api/views.py | benjaminbills/portfolio | 0 | 6613892 | from rest_framework import generics
from rest_framework.decorators import api_view
from api.email import send_email
from portfolio.models import Project
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes
from rest_framework import serializers, status
from .serializers import ProjectSerializer
from rest_framework.permissions import BasePermission, IsAdminUser, DjangoModelPermissions, SAFE_METHODS
@api_view(['GET'])
def projectList(request):
project = Project.objects.all()
print(project)
serializer = ProjectSerializer(project, many=True)
return Response(serializer.data)
@api_view(['POST'])
def SendMail(request):
## send message.
data = request.data
try:
message = data['message']
name = data['name']
email = data['email']
send_email(name, message, email)
message = {'detail':'successfully sent message'}
return Response(message,status=status.HTTP_200_OK)
except:
message = {'detail':'fill in your name, email, message'}
return Response(message,status=status.HTTP_406_NOT_ACCEPTABLE) | from rest_framework import generics
from rest_framework.decorators import api_view
from api.email import send_email
from portfolio.models import Project
from rest_framework.response import Response
from rest_framework.decorators import api_view, permission_classes
from rest_framework import serializers, status
from .serializers import ProjectSerializer
from rest_framework.permissions import BasePermission, IsAdminUser, DjangoModelPermissions, SAFE_METHODS
@api_view(['GET'])
def projectList(request):
project = Project.objects.all()
print(project)
serializer = ProjectSerializer(project, many=True)
return Response(serializer.data)
@api_view(['POST'])
def SendMail(request):
## send message.
data = request.data
try:
message = data['message']
name = data['name']
email = data['email']
send_email(name, message, email)
message = {'detail':'successfully sent message'}
return Response(message,status=status.HTTP_200_OK)
except:
message = {'detail':'fill in your name, email, message'}
return Response(message,status=status.HTTP_406_NOT_ACCEPTABLE) | en | 0.196695 | ## send message. | 2.00306 | 2 |
insights/tests/datasources/test_ps.py | maxamillion/insights-core | 121 | 6613893 | <reponame>maxamillion/insights-core<gh_stars>100-1000
import pytest
from mock.mock import Mock
from insights.core.dr import SkipComponent
from insights.core.spec_factory import DatasourceProvider
from insights.specs.datasources.ps import ps_eo_cmd, LocalSpecs
PS_DATA = """
PID COMMAND
1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31
2 [kthreadd]
3 [rcu_gp]
4 [rcu_par_gp]
6 [kworker/0:0H-events_highpri]
9 [mm_percpu_wq]
10 [rcu_tasks_kthre]
11 /usr/bin/python3 /home/user1/python_app.py
12 [kworker/u16:0-kcryptd/253:0]
"""
PS_EXPECTED = """
PID COMMAND
1 /usr/lib/systemd/systemd
2 [kthreadd]
3 [rcu_gp]
4 [rcu_par_gp]
6 [kworker/0:0H-events_highpri]
9 [mm_percpu_wq]
10 [rcu_tasks_kthre]
11 /usr/bin/python3
12 [kworker/u16:0-kcryptd/253:0]
"""
PS_BAD = "Command not found"
PS_EMPTY = """
PID COMMAND
"""
RELATIVE_PATH = 'insights_commands/ps_eo_cmd'
def test_ps_eo_cmd():
ps_eo_args = Mock()
ps_eo_args.content = PS_DATA.splitlines()
broker = {LocalSpecs.ps_eo_args: ps_eo_args}
result = ps_eo_cmd(broker)
assert result is not None
assert isinstance(result, DatasourceProvider)
expected = DatasourceProvider(content=PS_EXPECTED.strip(), relative_path=RELATIVE_PATH)
assert result.content == expected.content
assert result.relative_path == expected.relative_path
def test_ps_eo_cmd_bad():
ps_eo_args = Mock()
ps_eo_args.content = PS_BAD.splitlines()
broker = {LocalSpecs.ps_eo_args: ps_eo_args}
with pytest.raises(SkipComponent) as e:
ps_eo_cmd(broker)
assert e is not None
def test_ps_eo_cmd_empty():
ps_eo_args = Mock()
ps_eo_args.content = PS_EMPTY.splitlines()
broker = {LocalSpecs.ps_eo_args: ps_eo_args}
with pytest.raises(SkipComponent) as e:
ps_eo_cmd(broker)
assert e is not None
| import pytest
from mock.mock import Mock
from insights.core.dr import SkipComponent
from insights.core.spec_factory import DatasourceProvider
from insights.specs.datasources.ps import ps_eo_cmd, LocalSpecs
PS_DATA = """
PID COMMAND
1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31
2 [kthreadd]
3 [rcu_gp]
4 [rcu_par_gp]
6 [kworker/0:0H-events_highpri]
9 [mm_percpu_wq]
10 [rcu_tasks_kthre]
11 /usr/bin/python3 /home/user1/python_app.py
12 [kworker/u16:0-kcryptd/253:0]
"""
PS_EXPECTED = """
PID COMMAND
1 /usr/lib/systemd/systemd
2 [kthreadd]
3 [rcu_gp]
4 [rcu_par_gp]
6 [kworker/0:0H-events_highpri]
9 [mm_percpu_wq]
10 [rcu_tasks_kthre]
11 /usr/bin/python3
12 [kworker/u16:0-kcryptd/253:0]
"""
PS_BAD = "Command not found"
PS_EMPTY = """
PID COMMAND
"""
RELATIVE_PATH = 'insights_commands/ps_eo_cmd'
def test_ps_eo_cmd():
ps_eo_args = Mock()
ps_eo_args.content = PS_DATA.splitlines()
broker = {LocalSpecs.ps_eo_args: ps_eo_args}
result = ps_eo_cmd(broker)
assert result is not None
assert isinstance(result, DatasourceProvider)
expected = DatasourceProvider(content=PS_EXPECTED.strip(), relative_path=RELATIVE_PATH)
assert result.content == expected.content
assert result.relative_path == expected.relative_path
def test_ps_eo_cmd_bad():
ps_eo_args = Mock()
ps_eo_args.content = PS_BAD.splitlines()
broker = {LocalSpecs.ps_eo_args: ps_eo_args}
with pytest.raises(SkipComponent) as e:
ps_eo_cmd(broker)
assert e is not None
def test_ps_eo_cmd_empty():
ps_eo_args = Mock()
ps_eo_args.content = PS_EMPTY.splitlines()
broker = {LocalSpecs.ps_eo_args: ps_eo_args}
with pytest.raises(SkipComponent) as e:
ps_eo_cmd(broker)
assert e is not None | en | 0.399029 | PID COMMAND 1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31 2 [kthreadd] 3 [rcu_gp] 4 [rcu_par_gp] 6 [kworker/0:0H-events_highpri] 9 [mm_percpu_wq] 10 [rcu_tasks_kthre] 11 /usr/bin/python3 /home/user1/python_app.py 12 [kworker/u16:0-kcryptd/253:0] PID COMMAND 1 /usr/lib/systemd/systemd 2 [kthreadd] 3 [rcu_gp] 4 [rcu_par_gp] 6 [kworker/0:0H-events_highpri] 9 [mm_percpu_wq] 10 [rcu_tasks_kthre] 11 /usr/bin/python3 12 [kworker/u16:0-kcryptd/253:0] PID COMMAND | 2.100559 | 2 |
dojo/nlp/__init__.py | VIVelev/PyDojo | 4 | 6613894 | <filename>dojo/nlp/__init__.py
from .tf_idf import TF_IDF
__all__ = [
"TF_IDF",
]
| <filename>dojo/nlp/__init__.py
from .tf_idf import TF_IDF
__all__ = [
"TF_IDF",
]
| none | 1 | 0.947247 | 1 | |
data.py | zacksleo/cytq | 8 | 6613895 | # coding=utf-8
import sys
# init datas
class Data:
def __init__(self):
pass
weather_dict = {u'CLEAR_DAY': {u'name': u'晴天', u'icon': u'assets/sun-3.png'},
u'CLEAR_NIGHT': {u'name': u'晴夜', u'icon': u'assets/moon-1.png'},
u'PARTLY_CLOUDY_DAY': {u'name': u'多云', u'icon': u'assets/cloudy.png'},
u'PARTLY_CLOUDY_NIGHT': {u'name': u'多云', u'icon': u'assets/cloudy-night.png'},
u'CLOUDY': {u'name': u'阴', u'icon': u'assets/cloud.png'},
u'RAIN': {u'name': u'雨', u'icon': u'assets/rain-1.png'},
u'SNOW': {u'name': u'雪', u'icon': u'assets/snow.png'},
u'WIND': {u'name': u'风', u'icon': u'assets/windy.png'},
u'FOG': {u'name': u'雾', u'icon': u'assets/fogg.png'}, u'HAZE': {u'name': u'霾'},
u'SLEET': {u'name': u'冻雨'}}
@staticmethod
def get_wind_direction(wd):
if wd <= 22.5 or wd > 337.5:
return u'北风'
elif 22.5 < wd <= 67.5:
return u'东北风'
elif 67.5 < wd <= 112.5:
return u'东风'
elif 112.5 < wd <= 157.5:
return u'东南风'
elif 157.5 < wd <= 202.5:
return u'南风'
elif 202.5 < wd <= 247.5:
return u'西南风'
elif 247.5 < wd <= 292.5:
return u'西风'
elif 292.5 < wd <= 337.5:
return u'西北风'
@staticmethod
def get_wind_speed(ws):
if ws <= 2:
return u'无风'
if 2 < ws <= 6:
return u'软风'
elif 6 < ws <= 12:
return u'轻风'
elif 12 < ws <= 19:
return u'缓风'
elif 19 < ws <= 30:
return u'和风'
elif 30 < ws <= 40:
return u'清风'
elif 40 < ws <= 51:
return u'强风'
elif 51 < ws <= 62:
return u'疾风'
elif 62 < ws <= 75:
return u'烈风'
elif 75 < ws <= 87:
return u'增强烈风'
elif 87 < ws <= 103:
return u'暴风'
elif 103 < ws <= 149:
return u'台风'
elif 149 < ws <= 183:
return u'强台飓风'
elif 183 < ws <= 220:
return u'超强台飓风'
else:
return u'极强台飓风'
| # coding=utf-8
import sys
# init datas
class Data:
def __init__(self):
pass
weather_dict = {u'CLEAR_DAY': {u'name': u'晴天', u'icon': u'assets/sun-3.png'},
u'CLEAR_NIGHT': {u'name': u'晴夜', u'icon': u'assets/moon-1.png'},
u'PARTLY_CLOUDY_DAY': {u'name': u'多云', u'icon': u'assets/cloudy.png'},
u'PARTLY_CLOUDY_NIGHT': {u'name': u'多云', u'icon': u'assets/cloudy-night.png'},
u'CLOUDY': {u'name': u'阴', u'icon': u'assets/cloud.png'},
u'RAIN': {u'name': u'雨', u'icon': u'assets/rain-1.png'},
u'SNOW': {u'name': u'雪', u'icon': u'assets/snow.png'},
u'WIND': {u'name': u'风', u'icon': u'assets/windy.png'},
u'FOG': {u'name': u'雾', u'icon': u'assets/fogg.png'}, u'HAZE': {u'name': u'霾'},
u'SLEET': {u'name': u'冻雨'}}
@staticmethod
def get_wind_direction(wd):
if wd <= 22.5 or wd > 337.5:
return u'北风'
elif 22.5 < wd <= 67.5:
return u'东北风'
elif 67.5 < wd <= 112.5:
return u'东风'
elif 112.5 < wd <= 157.5:
return u'东南风'
elif 157.5 < wd <= 202.5:
return u'南风'
elif 202.5 < wd <= 247.5:
return u'西南风'
elif 247.5 < wd <= 292.5:
return u'西风'
elif 292.5 < wd <= 337.5:
return u'西北风'
@staticmethod
def get_wind_speed(ws):
if ws <= 2:
return u'无风'
if 2 < ws <= 6:
return u'软风'
elif 6 < ws <= 12:
return u'轻风'
elif 12 < ws <= 19:
return u'缓风'
elif 19 < ws <= 30:
return u'和风'
elif 30 < ws <= 40:
return u'清风'
elif 40 < ws <= 51:
return u'强风'
elif 51 < ws <= 62:
return u'疾风'
elif 62 < ws <= 75:
return u'烈风'
elif 75 < ws <= 87:
return u'增强烈风'
elif 87 < ws <= 103:
return u'暴风'
elif 103 < ws <= 149:
return u'台风'
elif 149 < ws <= 183:
return u'强台飓风'
elif 183 < ws <= 220:
return u'超强台飓风'
else:
return u'极强台飓风'
| en | 0.32909 | # coding=utf-8 # init datas | 3.022178 | 3 |
apiV4/match.py | davidmatag/api-lol-python | 0 | 6613896 | import json
import requests
def matById(_reg, _matchId, _apiKey):
"""Get match by match ID"""
response = requests.get("https://" + _reg + ".api.riotgames.com/lol/match/v4/matches/"+ _matchId + "?api_key=" + _apiKey)
data = json.loads(response.text)
print(json.dumps(data, indent=4))
def allMatch(_reg, _accountId, _apiKey):
"""Get matchlist for games played on given account ID and filtered using given filter parameters, if any"""
response = requests.get("https://" + _reg + ".api.riotgames.com/lol/match/v4/matchlists/by-account/"+ _accountId + "?api_key=" + _apiKey)
data = json.loads(response.text)
print(json.dumps(data, indent=4))
def timeLineById(_reg, _matchId, _apiKey):
"""Get match timeline by match ID"""
response = requests.get("https://" + _reg + ".api.riotgames.com/lol/match/v4/timelines/by-match/"+ _summonerId + "?api_key=" + _apiKey)
data = json.loads(response.text)
print(json.dumps(data, indent=4))
def matIdByTourCode(_reg, _tournamentCode, _apiKey):
"""Get match IDs by tournament code"""
response = requests.get("https://" + _reg + ".api.riotgames.com/lol/match/v4/matches/by-tournament-code/" + _tournamentCode + "/ids?api_key=" + _apiKey)
data = json.loads(response.text)
print(json.dumps(data, indent=4))
def matByIdaTourCode(_reg, _matchId, _tournamentCode, _apiKey):
"""Get match by match ID and tournament code"""
response = requests.get("https://" + _reg + ".api.riotgames.com/lol/match/v4/matches/" + _matchId + "/by-tournament-code/" + _tournamentCode + "?api_key=" + _apiKey)
data = json.loads(response.text)
print(json.dumps(data, indent=4))
| import json
import requests
def matById(_reg, _matchId, _apiKey):
"""Get match by match ID"""
response = requests.get("https://" + _reg + ".api.riotgames.com/lol/match/v4/matches/"+ _matchId + "?api_key=" + _apiKey)
data = json.loads(response.text)
print(json.dumps(data, indent=4))
def allMatch(_reg, _accountId, _apiKey):
"""Get matchlist for games played on given account ID and filtered using given filter parameters, if any"""
response = requests.get("https://" + _reg + ".api.riotgames.com/lol/match/v4/matchlists/by-account/"+ _accountId + "?api_key=" + _apiKey)
data = json.loads(response.text)
print(json.dumps(data, indent=4))
def timeLineById(_reg, _matchId, _apiKey):
"""Get match timeline by match ID"""
response = requests.get("https://" + _reg + ".api.riotgames.com/lol/match/v4/timelines/by-match/"+ _summonerId + "?api_key=" + _apiKey)
data = json.loads(response.text)
print(json.dumps(data, indent=4))
def matIdByTourCode(_reg, _tournamentCode, _apiKey):
"""Get match IDs by tournament code"""
response = requests.get("https://" + _reg + ".api.riotgames.com/lol/match/v4/matches/by-tournament-code/" + _tournamentCode + "/ids?api_key=" + _apiKey)
data = json.loads(response.text)
print(json.dumps(data, indent=4))
def matByIdaTourCode(_reg, _matchId, _tournamentCode, _apiKey):
"""Get match by match ID and tournament code"""
response = requests.get("https://" + _reg + ".api.riotgames.com/lol/match/v4/matches/" + _matchId + "/by-tournament-code/" + _tournamentCode + "?api_key=" + _apiKey)
data = json.loads(response.text)
print(json.dumps(data, indent=4))
| en | 0.864772 | Get match by match ID Get matchlist for games played on given account ID and filtered using given filter parameters, if any Get match timeline by match ID Get match IDs by tournament code Get match by match ID and tournament code | 2.749957 | 3 |
mySite/applications/resume/urls.py | ALittleMoron/django-my-site | 0 | 6613897 | from django.urls import path
from .views import Resume
urlpatterns = [
path("", Resume.as_view(), name="aboutMe"),
]
| from django.urls import path
from .views import Resume
urlpatterns = [
path("", Resume.as_view(), name="aboutMe"),
]
| none | 1 | 1.677181 | 2 | |
tasks.py | franceme/Scripts | 0 | 6613898 | #!/usr/bin/env python3
import os,sys
if __name__ == "__main__":
os.system(f"{sys.executable} -m pip install --upgrade invoke funbelts")
if os.path.exists("~/.bashrc"):
with open("~/.bashrc", "a") as appender:
appender.write("alias voke=invoke")
sys.exit(0)
from invoke import task
import funbelts as ut
@task
def load(c):
print("Starting")
print("Loaded")
return True
@task
def clean(c):
return True
| #!/usr/bin/env python3
import os,sys
if __name__ == "__main__":
os.system(f"{sys.executable} -m pip install --upgrade invoke funbelts")
if os.path.exists("~/.bashrc"):
with open("~/.bashrc", "a") as appender:
appender.write("alias voke=invoke")
sys.exit(0)
from invoke import task
import funbelts as ut
@task
def load(c):
print("Starting")
print("Loaded")
return True
@task
def clean(c):
return True
| fr | 0.221828 | #!/usr/bin/env python3 | 2.136594 | 2 |
bin/remove-repeated-headers.py | acorg/slurm-pipeline | 27 | 6613899 | <filename>bin/remove-repeated-headers.py
#!/usr/bin/env python
"""
This is a simple filter that reads a (first) header line on standard input,
prints it, and then prints all subsequent lines from standard input unless they
are repeats of the header line.
It is useful if you have thousands of identical TSV (or CSV, etc) output files
and they all have a header and you want to get rid of all headers except the
first. Just 'cat' the files into this script.
This script can be used to post-process output files generated by multiple
invocations of a command by sbatch.py.
"""
import sys
header = next(sys.stdin)
print(header, end='')
for line in sys.stdin:
if line != header:
print(line, end='')
| <filename>bin/remove-repeated-headers.py
#!/usr/bin/env python
"""
This is a simple filter that reads a (first) header line on standard input,
prints it, and then prints all subsequent lines from standard input unless they
are repeats of the header line.
It is useful if you have thousands of identical TSV (or CSV, etc) output files
and they all have a header and you want to get rid of all headers except the
first. Just 'cat' the files into this script.
This script can be used to post-process output files generated by multiple
invocations of a command by sbatch.py.
"""
import sys
header = next(sys.stdin)
print(header, end='')
for line in sys.stdin:
if line != header:
print(line, end='')
| en | 0.888802 | #!/usr/bin/env python This is a simple filter that reads a (first) header line on standard input, prints it, and then prints all subsequent lines from standard input unless they are repeats of the header line. It is useful if you have thousands of identical TSV (or CSV, etc) output files and they all have a header and you want to get rid of all headers except the first. Just 'cat' the files into this script. This script can be used to post-process output files generated by multiple invocations of a command by sbatch.py. | 3.222578 | 3 |
custom_components/solcast_solar/energy.py | Nag94/HomeAssistantConfig | 65 | 6613900 | <gh_stars>10-100
"""Energy platform."""
from __future__ import annotations
from homeassistant.core import HomeAssistant
from .const import DOMAIN
from . import SolcastRooftopSite
async def async_get_solar_forecast(hass: HomeAssistant, config_entry_id: str):
"""Get solar forecast for a config entry ID."""
coordinator: SolcastRooftopSite = hass.data[DOMAIN][config_entry_id]
if coordinator is None:
return None
return coordinator.get_energy_tab_data() | """Energy platform."""
from __future__ import annotations
from homeassistant.core import HomeAssistant
from .const import DOMAIN
from . import SolcastRooftopSite
async def async_get_solar_forecast(hass: HomeAssistant, config_entry_id: str):
"""Get solar forecast for a config entry ID."""
coordinator: SolcastRooftopSite = hass.data[DOMAIN][config_entry_id]
if coordinator is None:
return None
return coordinator.get_energy_tab_data() | en | 0.678156 | Energy platform. Get solar forecast for a config entry ID. | 2.011631 | 2 |
output/models/ibm_data/valid/d3_4_6/d3_4_6v05_xsd/__init__.py | tefra/xsdata-w3c-tests | 1 | 6613901 | from output.models.ibm_data.valid.d3_4_6.d3_4_6v05_xsd.d3_4_6v05 import (
Nametest,
Root,
)
__all__ = [
"Nametest",
"Root",
]
| from output.models.ibm_data.valid.d3_4_6.d3_4_6v05_xsd.d3_4_6v05 import (
Nametest,
Root,
)
__all__ = [
"Nametest",
"Root",
]
| none | 1 | 1.120892 | 1 | |
graphics/trunc20/draw_svg.py | mamewotoko/threejs_sample | 1 | 6613902 |
import math
canvas_size = (800, 800)
r6 = 100
theta = math.pi*2/6
o = map(lambda x: x/2, canvas_size)
l = r6
r5 = l/(2*math.sin(math.pi/5))
h5 = r5*(1+math.cos(math.pi/5))
h = l*math.cos(math.pi/6)
def points(i):
return map(int, (r6*math.cos(theta*i)+o[0], -r6*math.sin(theta*i)+o[1]))
print '<svg width="%d" height="%d" xmlns="http://www.w3.org/2000/svg">' % canvas_size
print '<circle cx="%d" cy="%d" r="%d" fill="none" stroke="black" />' % (o[0], o[1], r6)
# axis
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="black" />' % (0, o[1], canvas_size[0], o[1])
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="black" />' % (o[0], 0, o[0], canvas_size[1])
pointlist = [points(i) for i in range(0, 6)]
for i in range(0, len(pointlist)):
fromp = pointlist[i]
top = pointlist[(i+1)%len(pointlist)]
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="black" />' % (fromp[0], fromp[1], top[0], top[1])
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="green" stroke-dasharray="4,4" />' % (pointlist[0][0], pointlist[0][1], pointlist[0][0], 0)
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="green" stroke-dasharray="4,4" />' % (pointlist[3][0], pointlist[3][1], pointlist[3][0], 0)
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="green" stroke-dasharray="4,4" />' % (pointlist[1][0], pointlist[1][1], pointlist[1][0], 0)
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="green" stroke-dasharray="4,4" />' % (pointlist[2][0], pointlist[2][1], pointlist[2][0], 0)
up6_0 = [pointlist[0][0], pointlist[1][1]-r6*math.sin(math.pi/3)]
up6_1 = [pointlist[1][0], pointlist[1][1]-2*r6*math.sin(math.pi/3)]
up6_2 = [pointlist[2][0], pointlist[1][1]-2*r6*math.sin(math.pi/3)]
up6_3 = [pointlist[3][0], pointlist[2][1]-r6*math.sin(math.pi/3)]
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="green" />' % (pointlist[1][0], pointlist[1][1], up6_0[0], up6_0[1])
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="green" />' % (up6_1[0], up6_1[1], up6_0[0], up6_0[1])
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="green" />' % (up6_1[0], up6_1[1], up6_2[0], up6_2[1])
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="green" />' % (up6_3[0], up6_3[1], up6_2[0], up6_2[1])
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="green" />' % (up6_3[0], up6_3[1], pointlist[2][0], pointlist[2][1])
## rotate 0-1 -3*Math.pi/5: math.pi - math.pi/3 - 3*math.pi/5 = math.pi/15
bottom1 = [l*math.cos(math.pi/15)+pointlist[0][0], -l*math.sin(math.pi/15)+pointlist[0][1]]
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="pink" />' % (pointlist[0][0], pointlist[0][1], bottom1[0], bottom1[1])
print '<text x="%d" y="%d">Math.PI/15</text>' % (bottom1[0], bottom1[1])
bottom2 = [l*math.cos(math.pi*4/15)+pointlist[1][0], -l*math.sin(math.pi*4/15) + pointlist[1][1]]
print '<circle cx="%d" cy="%d" r="%d" stroke-dasharray="4,4" fill="none" stroke="gray" />' % (pointlist[0][0], pointlist[0][1], l)
print '<circle cx="%d" cy="%d" r="%d" stroke-dasharray="4,4" fill="none" stroke="gray" />' % (pointlist[1][0], pointlist[1][1], l)
## top
o2top_len = r6*math.sin(math.pi/3) + h5
bottom3 = [o2top_len*math.cos(math.pi/6)+o[0], -o2top_len*math.sin(math.pi/6)+o[1]]
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="pink" stroke-dasharray="4,4" />' % (o[0], o[1], bottom3[0], bottom3[1])
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="pink" />' % (bottom1[0], bottom1[1], bottom3[0], bottom3[1])
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="pink" />' % (bottom2[0], bottom2[1], bottom3[0], bottom3[1])
#0 move line
bottom1move = [bottom1[0]-o2top_len*math.cos(math.pi/6), bottom1[1]+o2top_len*math.sin(math.pi/6)]
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="pink" stroke-dasharray="4,4" />' % (bottom1move[0], bottom1move[1], bottom1[0], bottom1[1])
bottom2move = [bottom2[0]-o2top_len*math.cos(math.pi/6), bottom2[1]+o2top_len*math.sin(math.pi/6)]
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="pink" stroke-dasharray="4,4" />' % (bottom2move[0], bottom2move[1], bottom2[0], bottom2[1])
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="pink" />' % (pointlist[1][0], pointlist[1][1], bottom2[0], bottom2[1])
right6_4 = [pointlist[5][0] + r6*math.cos(-math.pi/3), pointlist[5][1] - r6*math.sin(-math.pi/3)]
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="red" />' % (pointlist[5][0], pointlist[5][1], right6_4[0], right6_4[1])
right6_5 = [right6_4[0]+r6, right6_4[1]]
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="red" />' % (right6_5[0], right6_5[1], right6_4[0], right6_4[1])
right6_1 = [pointlist[0][0]+r6, pointlist[0][1]]
right6_0 = [right6_1[0] + r6*math.cos(-math.pi/3), right6_1[1] - r6*math.sin(-math.pi/3)]
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="red" />' % (right6_1[0], right6_1[1], right6_0[0], right6_0[1])
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="red" />' % (right6_5[0], right6_5[1], right6_0[0], right6_0[1])
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="red" stroke-dasharray="4,4"/>' % (right6_1[0], right6_1[1], pointlist[1][0], pointlist[1][1])
print '</svg>'
|
import math
canvas_size = (800, 800)
r6 = 100
theta = math.pi*2/6
o = map(lambda x: x/2, canvas_size)
l = r6
r5 = l/(2*math.sin(math.pi/5))
h5 = r5*(1+math.cos(math.pi/5))
h = l*math.cos(math.pi/6)
def points(i):
return map(int, (r6*math.cos(theta*i)+o[0], -r6*math.sin(theta*i)+o[1]))
print '<svg width="%d" height="%d" xmlns="http://www.w3.org/2000/svg">' % canvas_size
print '<circle cx="%d" cy="%d" r="%d" fill="none" stroke="black" />' % (o[0], o[1], r6)
# axis
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="black" />' % (0, o[1], canvas_size[0], o[1])
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="black" />' % (o[0], 0, o[0], canvas_size[1])
pointlist = [points(i) for i in range(0, 6)]
for i in range(0, len(pointlist)):
fromp = pointlist[i]
top = pointlist[(i+1)%len(pointlist)]
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="black" />' % (fromp[0], fromp[1], top[0], top[1])
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="green" stroke-dasharray="4,4" />' % (pointlist[0][0], pointlist[0][1], pointlist[0][0], 0)
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="green" stroke-dasharray="4,4" />' % (pointlist[3][0], pointlist[3][1], pointlist[3][0], 0)
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="green" stroke-dasharray="4,4" />' % (pointlist[1][0], pointlist[1][1], pointlist[1][0], 0)
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="green" stroke-dasharray="4,4" />' % (pointlist[2][0], pointlist[2][1], pointlist[2][0], 0)
up6_0 = [pointlist[0][0], pointlist[1][1]-r6*math.sin(math.pi/3)]
up6_1 = [pointlist[1][0], pointlist[1][1]-2*r6*math.sin(math.pi/3)]
up6_2 = [pointlist[2][0], pointlist[1][1]-2*r6*math.sin(math.pi/3)]
up6_3 = [pointlist[3][0], pointlist[2][1]-r6*math.sin(math.pi/3)]
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="green" />' % (pointlist[1][0], pointlist[1][1], up6_0[0], up6_0[1])
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="green" />' % (up6_1[0], up6_1[1], up6_0[0], up6_0[1])
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="green" />' % (up6_1[0], up6_1[1], up6_2[0], up6_2[1])
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="green" />' % (up6_3[0], up6_3[1], up6_2[0], up6_2[1])
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="green" />' % (up6_3[0], up6_3[1], pointlist[2][0], pointlist[2][1])
## rotate 0-1 -3*Math.pi/5: math.pi - math.pi/3 - 3*math.pi/5 = math.pi/15
bottom1 = [l*math.cos(math.pi/15)+pointlist[0][0], -l*math.sin(math.pi/15)+pointlist[0][1]]
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="pink" />' % (pointlist[0][0], pointlist[0][1], bottom1[0], bottom1[1])
print '<text x="%d" y="%d">Math.PI/15</text>' % (bottom1[0], bottom1[1])
bottom2 = [l*math.cos(math.pi*4/15)+pointlist[1][0], -l*math.sin(math.pi*4/15) + pointlist[1][1]]
print '<circle cx="%d" cy="%d" r="%d" stroke-dasharray="4,4" fill="none" stroke="gray" />' % (pointlist[0][0], pointlist[0][1], l)
print '<circle cx="%d" cy="%d" r="%d" stroke-dasharray="4,4" fill="none" stroke="gray" />' % (pointlist[1][0], pointlist[1][1], l)
## top
o2top_len = r6*math.sin(math.pi/3) + h5
bottom3 = [o2top_len*math.cos(math.pi/6)+o[0], -o2top_len*math.sin(math.pi/6)+o[1]]
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="pink" stroke-dasharray="4,4" />' % (o[0], o[1], bottom3[0], bottom3[1])
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="pink" />' % (bottom1[0], bottom1[1], bottom3[0], bottom3[1])
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="pink" />' % (bottom2[0], bottom2[1], bottom3[0], bottom3[1])
#0 move line
bottom1move = [bottom1[0]-o2top_len*math.cos(math.pi/6), bottom1[1]+o2top_len*math.sin(math.pi/6)]
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="pink" stroke-dasharray="4,4" />' % (bottom1move[0], bottom1move[1], bottom1[0], bottom1[1])
bottom2move = [bottom2[0]-o2top_len*math.cos(math.pi/6), bottom2[1]+o2top_len*math.sin(math.pi/6)]
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="pink" stroke-dasharray="4,4" />' % (bottom2move[0], bottom2move[1], bottom2[0], bottom2[1])
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="pink" />' % (pointlist[1][0], pointlist[1][1], bottom2[0], bottom2[1])
right6_4 = [pointlist[5][0] + r6*math.cos(-math.pi/3), pointlist[5][1] - r6*math.sin(-math.pi/3)]
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="red" />' % (pointlist[5][0], pointlist[5][1], right6_4[0], right6_4[1])
right6_5 = [right6_4[0]+r6, right6_4[1]]
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="red" />' % (right6_5[0], right6_5[1], right6_4[0], right6_4[1])
right6_1 = [pointlist[0][0]+r6, pointlist[0][1]]
right6_0 = [right6_1[0] + r6*math.cos(-math.pi/3), right6_1[1] - r6*math.sin(-math.pi/3)]
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="red" />' % (right6_1[0], right6_1[1], right6_0[0], right6_0[1])
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="red" />' % (right6_5[0], right6_5[1], right6_0[0], right6_0[1])
print '<line x1="%d" y1="%d" x2="%d" y2="%d" stroke="red" stroke-dasharray="4,4"/>' % (right6_1[0], right6_1[1], pointlist[1][0], pointlist[1][1])
print '</svg>'
| en | 0.104686 | # axis ## rotate 0-1 -3*Math.pi/5: math.pi - math.pi/3 - 3*math.pi/5 = math.pi/15 ## top #0 move line | 3.225352 | 3 |
project-2/src/models/model_list.py | thomasnilsson/nlu-2019 | 0 | 6613903 | <filename>project-2/src/models/model_list.py
import data_loader as dl
import constants as C
from models.combinable_models import MODELS, get_model_class, get_model_info
from models.combiner import Combiner
MODELS["combiner"] = [Combiner, {"loader": dl.load_combined,
"file_name": C.combined_file}]
| <filename>project-2/src/models/model_list.py
import data_loader as dl
import constants as C
from models.combinable_models import MODELS, get_model_class, get_model_info
from models.combiner import Combiner
MODELS["combiner"] = [Combiner, {"loader": dl.load_combined,
"file_name": C.combined_file}]
| none | 1 | 1.86969 | 2 | |
example_pipeline/stages/parameter_guesser.py | rgasper/ml_hackathon_template | 1 | 6613904 | from eis.EISDataIO import (
eis_dataframe_from_csv,
eis_dataframe_to_csv,
parse_circuit_params_from_str,
circuit_params_dict_to_str,
)
import pandas as pd
def all_params_are_one(train_data: pd.DataFrame, circuit_guesses: pd.DataFrame) -> pd.DataFrame:
params_str_lookup = {}
for circuit_name in train_data.Circuit.unique():
sample = train_data.loc[train_data.Circuit == circuit_name].iloc[0]
params_str_lookup[sample.Circuit] = sample.Parameters
def get_params(circuit: str) -> str:
params_str = params_str_lookup[circuit]
params = parse_circuit_params_from_str(params_str)
for key in params:
params[key] = 1
return circuit_params_dict_to_str(params)
circuit_guesses["Parameters"] = circuit_guesses["Circuit"].apply(get_params)
return circuit_guesses
if __name__ == "__main__":
train_data = eis_dataframe_from_csv("example_pipeline/stage_data/train_data.csv")
circuit_guesses = eis_dataframe_from_csv("example_pipeline/stage_data/circuit_guesses.csv")
submission = all_params_are_one(train_data, circuit_guesses)
eis_dataframe_to_csv(submission, "example_pipeline/stage_data/submission.csv")
| from eis.EISDataIO import (
eis_dataframe_from_csv,
eis_dataframe_to_csv,
parse_circuit_params_from_str,
circuit_params_dict_to_str,
)
import pandas as pd
def all_params_are_one(train_data: pd.DataFrame, circuit_guesses: pd.DataFrame) -> pd.DataFrame:
params_str_lookup = {}
for circuit_name in train_data.Circuit.unique():
sample = train_data.loc[train_data.Circuit == circuit_name].iloc[0]
params_str_lookup[sample.Circuit] = sample.Parameters
def get_params(circuit: str) -> str:
params_str = params_str_lookup[circuit]
params = parse_circuit_params_from_str(params_str)
for key in params:
params[key] = 1
return circuit_params_dict_to_str(params)
circuit_guesses["Parameters"] = circuit_guesses["Circuit"].apply(get_params)
return circuit_guesses
if __name__ == "__main__":
train_data = eis_dataframe_from_csv("example_pipeline/stage_data/train_data.csv")
circuit_guesses = eis_dataframe_from_csv("example_pipeline/stage_data/circuit_guesses.csv")
submission = all_params_are_one(train_data, circuit_guesses)
eis_dataframe_to_csv(submission, "example_pipeline/stage_data/submission.csv")
| none | 1 | 2.956175 | 3 | |
places.py | cb109/places | 0 | 6613905 | <gh_stars>0
#!/bin/python3
"""Create a google map page with places as specified by a .csv file."""
# Based on this awesome example:
# http://stackoverflow.com/questions/22342097/is-it-possible-to-create-a-google-map-from-python
import csv
import logging
import sys
import pygeolib
from pygeocoder import Geocoder
USAGE = __doc__ + "\n\n" + "python places.py <my-visited-places.csv>"
DEFAULT_OUTPUTFILE = "map.html"
MARKER_TEMPLATE = """
new google.maps.Marker({{
position: new google.maps.LatLng({lat}, {lon}),
map: map
}});
"""
PAGE_TEMPLATE = """
<html>
<script src="https://maps.googleapis.com/maps/api/js?v=3.exp&sensor=false">
</script>
<div id="map-canvas" style="height: 100%; width: 100%"></div>
<script type="text/javascript">
var map;
function show_map() {{
map = new google.maps.Map(document.getElementById("map-canvas"), {{
zoom: 8,
center: new google.maps.LatLng({center_lat}, {center_long})
}});
{markers_code}
}}
google.maps.event.addDomListener(window, 'load', show_map);
</script>
</html>
"""
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(name)s] %(levelname)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S")
log = logging.getLogger("places")
class Map(object):
"""
Represents a Map as defined by the Google Maps API.
It contains custom markers and returns itself as valid html and
javascript when cast to a string.
"""
def __init__(self):
self._points = []
def add_point(self, coordinates):
self._points.append(coordinates)
def __str__(self):
if not self._points:
return "No places to show. Check your .csv file."
center_lat = sum((x[0] for x in self._points)) / len(self._points)
center_long = sum((x[1] for x in self._points)) / len(self._points)
markers_code = "\n".join(
[MARKER_TEMPLATE.format(lat=x[0], lon=x[1])
for x in self._points])
return PAGE_TEMPLATE.format(center_lat=center_lat,
center_long=center_long,
markers_code=markers_code)
def read_places_from_file(csvfile):
"""The .csv must have a single column with city names."""
with open(csvfile) as f:
sniffer = csv.Sniffer()
dialect = sniffer.sniff(f.read())
with open(csvfile) as f:
reader = csv.reader(f, delimiter=dialect.delimiter)
for row in reader:
place_column = row[0]
yield(place_column)
def get_geocoordinates_for_place(place):
"""Will return None if it can't resolve to coordinates."""
try:
coords = Geocoder.geocode(str(place)).coordinates
log.info("Found coordinates for: " + str(place))
return coords
except pygeolib.GeocoderError:
log.error("Could not get coordinates for: " + str(place))
return None
def visualize(csvfile):
"""Visualize places from .csv as google maps html page."""
log.info("Reading: " + str(csvfile))
places = read_places_from_file(csvfile)
unique_places = list(set(list(places)))
sorted_places = sorted(unique_places)
log.info("Read {0} unique places.".format(len(sorted_places)))
log.info("Querying coordinates.")
points = []
for place in sorted_places:
point = get_geocoordinates_for_place(place)
if point:
points.append(point)
log.info("Assembling map.")
custom_map = Map()
for point in points:
custom_map.add_point(point)
log.info("Writing results.")
with open(DEFAULT_OUTPUTFILE, "w") as out:
out.write(str(custom_map))
log.info("Map written to: " + DEFAULT_OUTPUTFILE)
log.info("Open it in a web browser to see the map.")
if __name__ == "__main__":
args = sys.argv[1:]
if len(args) != 1:
log.info(USAGE)
csvfile = args[0]
visualize(csvfile)
| #!/bin/python3
"""Create a google map page with places as specified by a .csv file."""
# Based on this awesome example:
# http://stackoverflow.com/questions/22342097/is-it-possible-to-create-a-google-map-from-python
import csv
import logging
import sys
import pygeolib
from pygeocoder import Geocoder
USAGE = __doc__ + "\n\n" + "python places.py <my-visited-places.csv>"
DEFAULT_OUTPUTFILE = "map.html"
MARKER_TEMPLATE = """
new google.maps.Marker({{
position: new google.maps.LatLng({lat}, {lon}),
map: map
}});
"""
PAGE_TEMPLATE = """
<html>
<script src="https://maps.googleapis.com/maps/api/js?v=3.exp&sensor=false">
</script>
<div id="map-canvas" style="height: 100%; width: 100%"></div>
<script type="text/javascript">
var map;
function show_map() {{
map = new google.maps.Map(document.getElementById("map-canvas"), {{
zoom: 8,
center: new google.maps.LatLng({center_lat}, {center_long})
}});
{markers_code}
}}
google.maps.event.addDomListener(window, 'load', show_map);
</script>
</html>
"""
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(name)s] %(levelname)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S")
log = logging.getLogger("places")
class Map(object):
"""
Represents a Map as defined by the Google Maps API.
It contains custom markers and returns itself as valid html and
javascript when cast to a string.
"""
def __init__(self):
self._points = []
def add_point(self, coordinates):
self._points.append(coordinates)
def __str__(self):
if not self._points:
return "No places to show. Check your .csv file."
center_lat = sum((x[0] for x in self._points)) / len(self._points)
center_long = sum((x[1] for x in self._points)) / len(self._points)
markers_code = "\n".join(
[MARKER_TEMPLATE.format(lat=x[0], lon=x[1])
for x in self._points])
return PAGE_TEMPLATE.format(center_lat=center_lat,
center_long=center_long,
markers_code=markers_code)
def read_places_from_file(csvfile):
"""The .csv must have a single column with city names."""
with open(csvfile) as f:
sniffer = csv.Sniffer()
dialect = sniffer.sniff(f.read())
with open(csvfile) as f:
reader = csv.reader(f, delimiter=dialect.delimiter)
for row in reader:
place_column = row[0]
yield(place_column)
def get_geocoordinates_for_place(place):
"""Will return None if it can't resolve to coordinates."""
try:
coords = Geocoder.geocode(str(place)).coordinates
log.info("Found coordinates for: " + str(place))
return coords
except pygeolib.GeocoderError:
log.error("Could not get coordinates for: " + str(place))
return None
def visualize(csvfile):
"""Visualize places from .csv as google maps html page."""
log.info("Reading: " + str(csvfile))
places = read_places_from_file(csvfile)
unique_places = list(set(list(places)))
sorted_places = sorted(unique_places)
log.info("Read {0} unique places.".format(len(sorted_places)))
log.info("Querying coordinates.")
points = []
for place in sorted_places:
point = get_geocoordinates_for_place(place)
if point:
points.append(point)
log.info("Assembling map.")
custom_map = Map()
for point in points:
custom_map.add_point(point)
log.info("Writing results.")
with open(DEFAULT_OUTPUTFILE, "w") as out:
out.write(str(custom_map))
log.info("Map written to: " + DEFAULT_OUTPUTFILE)
log.info("Open it in a web browser to see the map.")
if __name__ == "__main__":
args = sys.argv[1:]
if len(args) != 1:
log.info(USAGE)
csvfile = args[0]
visualize(csvfile) | en | 0.521648 | #!/bin/python3 Create a google map page with places as specified by a .csv file. # Based on this awesome example: # http://stackoverflow.com/questions/22342097/is-it-possible-to-create-a-google-map-from-python new google.maps.Marker({{ position: new google.maps.LatLng({lat}, {lon}), map: map }}); <html> <script src="https://maps.googleapis.com/maps/api/js?v=3.exp&sensor=false"> </script> <div id="map-canvas" style="height: 100%; width: 100%"></div> <script type="text/javascript"> var map; function show_map() {{ map = new google.maps.Map(document.getElementById("map-canvas"), {{ zoom: 8, center: new google.maps.LatLng({center_lat}, {center_long}) }}); {markers_code} }} google.maps.event.addDomListener(window, 'load', show_map); </script> </html> Represents a Map as defined by the Google Maps API. It contains custom markers and returns itself as valid html and javascript when cast to a string. The .csv must have a single column with city names. Will return None if it can't resolve to coordinates. Visualize places from .csv as google maps html page. | 3.572818 | 4 |
processingAndVisualization/code/pythonCode/oldCode/normalizeTFs.py | sorgerlab/pCycIF_Segmentation | 0 | 6613906 | import pandas as pd
from sklearn.cluster import KMeans
import numpy as np
import glob
TFlist = ['p-c-Jun_Cyto','p-STAT5_Cyto','p-STAT1_Cyto','p-STAT3_Cyto','c-Myc_Cyto','c-Jun_Cyto','NFkB_Cyto','p-STAT5_Cyto','STAT3_Cyto']
for drug in ['dmso','soraf']:
for fn in sorted(glob.glob('/home/bobby/Dropbox/MATLAB/cardiotoxCycif/segmentation/newSegmentationData/dapiAdded/csvProcessing/doseSeparated/proteome/**/%s/*.csv' % drug,recursive=True)):
print(fn)
df = pd.read_csv(fn)
for tf in TFlist:
if tf in df.columns:
tf_cyto = df[tf]
tf_nuc_name = str(tf.split('_')[:-1][0]) + '_Nuc'
tf_nuc = df[tf_nuc_name]
tf_nc_ratio = tf_nuc/tf_cyto
newname = str(tf.split('_')[:-1][0]) + '_NC_Ratio'
df[newname] = tf_nc_ratio
#pull tf_cyto and nuc
dose = fn.split('/')[-1].split('_')[0]
df.to_csv('/home/bobby/Dropbox/MATLAB/cardiotoxCycif/segmentation/newSegmentationData/dapiAdded/csvProcessing/tfRatios/proteome/%s/%s/%s' % (dose,drug,fn.split('/')[-1]))
for drug in ['dmso','soraf']:
for fn in sorted(glob.glob('/home/bobby/Dropbox/MATLAB/cardiotoxCycif/segmentation/newSegmentationData/dapiAdded/csvProcessing/doseSeparated/signaling/**/%s/*.csv' % drug,recursive=True)):
df = pd.read_csv(fn)
for tf in TFlist:
if tf in df.columns:
tf_cyto = df[tf]
tf_nuc_name = str(tf.split('_')[:-1][0]) + '_Nuc'
tf_nuc = df[tf_nuc_name]
tf_nc_ratio = tf_nuc/tf_cyto
newname = str(tf.split('_')[:-1][0]) + '_NC_Ratio'
df[newname] = tf_nc_ratio
#pull tf_cyto and nuc
dose = fn.split('/')[-1].split('_')[0]
df.to_csv('/home/bobby/Dropbox/MATLAB/cardiotoxCycif/segmentation/newSegmentationData/dapiAdded/csvProcessing/tfRatios/signaling/%s/%s/%s' % (dose,drug,fn.split('/')[-1]))
# lowdose.to_csv('/home/bobby/Dropbox/MATLAB/cardiotoxCycif/segmentation/newSegmentationData/dapiAdded/clustering/doses/signaling/lowdose/%s/lowdose_%s' % (drug,fn.split('/')[-1]))
| import pandas as pd
from sklearn.cluster import KMeans
import numpy as np
import glob
TFlist = ['p-c-Jun_Cyto','p-STAT5_Cyto','p-STAT1_Cyto','p-STAT3_Cyto','c-Myc_Cyto','c-Jun_Cyto','NFkB_Cyto','p-STAT5_Cyto','STAT3_Cyto']
for drug in ['dmso','soraf']:
for fn in sorted(glob.glob('/home/bobby/Dropbox/MATLAB/cardiotoxCycif/segmentation/newSegmentationData/dapiAdded/csvProcessing/doseSeparated/proteome/**/%s/*.csv' % drug,recursive=True)):
print(fn)
df = pd.read_csv(fn)
for tf in TFlist:
if tf in df.columns:
tf_cyto = df[tf]
tf_nuc_name = str(tf.split('_')[:-1][0]) + '_Nuc'
tf_nuc = df[tf_nuc_name]
tf_nc_ratio = tf_nuc/tf_cyto
newname = str(tf.split('_')[:-1][0]) + '_NC_Ratio'
df[newname] = tf_nc_ratio
#pull tf_cyto and nuc
dose = fn.split('/')[-1].split('_')[0]
df.to_csv('/home/bobby/Dropbox/MATLAB/cardiotoxCycif/segmentation/newSegmentationData/dapiAdded/csvProcessing/tfRatios/proteome/%s/%s/%s' % (dose,drug,fn.split('/')[-1]))
for drug in ['dmso','soraf']:
for fn in sorted(glob.glob('/home/bobby/Dropbox/MATLAB/cardiotoxCycif/segmentation/newSegmentationData/dapiAdded/csvProcessing/doseSeparated/signaling/**/%s/*.csv' % drug,recursive=True)):
df = pd.read_csv(fn)
for tf in TFlist:
if tf in df.columns:
tf_cyto = df[tf]
tf_nuc_name = str(tf.split('_')[:-1][0]) + '_Nuc'
tf_nuc = df[tf_nuc_name]
tf_nc_ratio = tf_nuc/tf_cyto
newname = str(tf.split('_')[:-1][0]) + '_NC_Ratio'
df[newname] = tf_nc_ratio
#pull tf_cyto and nuc
dose = fn.split('/')[-1].split('_')[0]
df.to_csv('/home/bobby/Dropbox/MATLAB/cardiotoxCycif/segmentation/newSegmentationData/dapiAdded/csvProcessing/tfRatios/signaling/%s/%s/%s' % (dose,drug,fn.split('/')[-1]))
# lowdose.to_csv('/home/bobby/Dropbox/MATLAB/cardiotoxCycif/segmentation/newSegmentationData/dapiAdded/clustering/doses/signaling/lowdose/%s/lowdose_%s' % (drug,fn.split('/')[-1]))
| en | 0.345414 | #pull tf_cyto and nuc #pull tf_cyto and nuc # lowdose.to_csv('/home/bobby/Dropbox/MATLAB/cardiotoxCycif/segmentation/newSegmentationData/dapiAdded/clustering/doses/signaling/lowdose/%s/lowdose_%s' % (drug,fn.split('/')[-1])) | 2.207458 | 2 |
poluk/process.py | simsy88/poluk | 0 | 6613907 | import numpy as np
from poluk import constants
def process(df, keep_ni=False, new=None):
""" Tidies up a raw Electoral Calculus data set
:param df: Pandas DataFrame from reading a raw Electoral Calculus data set
:param keep_ni: Boolean to indicate whether Northern Ireland seats should be kept in the output
:param new: Dict of new parties. Keys are party abbreviation. Values are the votes per seat
:return: Pandas DataFrame that's been nicely tidied up
"""
# Gather party codes
parties = list(df.columns[5:])
# Name Northern Irish parties
if len(parties) == 6:
ni_parties = ['UUP', 'SDLP', 'DUP', 'SF', 'MIN', 'OTH']
elif len(parties) == 8:
ni_parties = ['UUP', 'SDLP', 'DUP', 'APNI', 'Green', 'SF', 'MIN', 'OTH']
else:
raise AssertionError('Wrong number of parties in raw Electoral Calculus file')
ni_conv_dict = {parties[i]:ni_parties[i] for i in range(len(parties))}
# Rename raw vote columns
df.columns = list(df.columns[:5]) + [p+'_raw' for p in parties]
# Add new parties
if new:
for p in new.keys():
parties.append(p)
df[p+'_raw'] = new[p]
df[p+'_raw'].fillna(0, inplace=True)
df['OTH_raw'] = (df['OTH_raw'] - new[p]).fillna(df['OTH_raw'])
# Process Northern Ireland
if keep_ni:
for gbp in ni_conv_dict.keys():
nip = ni_conv_dict[gbp]
if nip not in parties:
df[nip+'_raw'] = np.where(df['Area'] == 1, df[gbp+'_raw'], 0)
df[gbp+'_raw'] = np.where(df['Area'] != 1, df[gbp+'_raw'], 0)
parties.append(nip)
else:
df = df[df['Area'] != 1].reset_index(drop=True).copy()
# Process NAT into SNP and Plaid
for t in [('SNP',2),('Plaid',6)]:
df[t[0]+'_raw'] = np.where(df['Area'] == t[1], df['NAT_raw'], 0)
parties.append(t[0])
df.drop('NAT_raw', axis=1, inplace=True)
parties.remove('NAT')
# Further processing
df['Region'] = df['Area'].map(constants.region_dict)
parties_dict = {i: parties[i] for i in range(len(parties))}
parties_raw = [p+'_raw' for p in parties]
df['Total_votes'] = df[parties_raw].sum(axis=1)
df['Winner'] = np.argmax(df[parties_raw].values, axis=1)
df['Winner'] = df['Winner'].map(parties_dict)
df['Winner_votes'] = df[parties_raw].max(axis=1)
df['Winner_pct'] = 100.00 * df['Winner_votes'] / df['Total_votes']
for p in parties:
df[p+'_pct'] = 100.00 * df[p+'_raw'] / df['Total_votes']
df[p+'_swing_needed'] = (df['Winner_pct'] - df[p+'_pct']) / 2.00
df['Swing_to_gain_pct'] = np.sort(df[[p+'_swing_needed' for p in parties]], axis=1)[:, 1]
df['Majority_pct'] = df['Swing_to_gain_pct'] * 2.00
return df
| import numpy as np
from poluk import constants
def process(df, keep_ni=False, new=None):
""" Tidies up a raw Electoral Calculus data set
:param df: Pandas DataFrame from reading a raw Electoral Calculus data set
:param keep_ni: Boolean to indicate whether Northern Ireland seats should be kept in the output
:param new: Dict of new parties. Keys are party abbreviation. Values are the votes per seat
:return: Pandas DataFrame that's been nicely tidied up
"""
# Gather party codes
parties = list(df.columns[5:])
# Name Northern Irish parties
if len(parties) == 6:
ni_parties = ['UUP', 'SDLP', 'DUP', 'SF', 'MIN', 'OTH']
elif len(parties) == 8:
ni_parties = ['UUP', 'SDLP', 'DUP', 'APNI', 'Green', 'SF', 'MIN', 'OTH']
else:
raise AssertionError('Wrong number of parties in raw Electoral Calculus file')
ni_conv_dict = {parties[i]:ni_parties[i] for i in range(len(parties))}
# Rename raw vote columns
df.columns = list(df.columns[:5]) + [p+'_raw' for p in parties]
# Add new parties
if new:
for p in new.keys():
parties.append(p)
df[p+'_raw'] = new[p]
df[p+'_raw'].fillna(0, inplace=True)
df['OTH_raw'] = (df['OTH_raw'] - new[p]).fillna(df['OTH_raw'])
# Process Northern Ireland
if keep_ni:
for gbp in ni_conv_dict.keys():
nip = ni_conv_dict[gbp]
if nip not in parties:
df[nip+'_raw'] = np.where(df['Area'] == 1, df[gbp+'_raw'], 0)
df[gbp+'_raw'] = np.where(df['Area'] != 1, df[gbp+'_raw'], 0)
parties.append(nip)
else:
df = df[df['Area'] != 1].reset_index(drop=True).copy()
# Process NAT into SNP and Plaid
for t in [('SNP',2),('Plaid',6)]:
df[t[0]+'_raw'] = np.where(df['Area'] == t[1], df['NAT_raw'], 0)
parties.append(t[0])
df.drop('NAT_raw', axis=1, inplace=True)
parties.remove('NAT')
# Further processing
df['Region'] = df['Area'].map(constants.region_dict)
parties_dict = {i: parties[i] for i in range(len(parties))}
parties_raw = [p+'_raw' for p in parties]
df['Total_votes'] = df[parties_raw].sum(axis=1)
df['Winner'] = np.argmax(df[parties_raw].values, axis=1)
df['Winner'] = df['Winner'].map(parties_dict)
df['Winner_votes'] = df[parties_raw].max(axis=1)
df['Winner_pct'] = 100.00 * df['Winner_votes'] / df['Total_votes']
for p in parties:
df[p+'_pct'] = 100.00 * df[p+'_raw'] / df['Total_votes']
df[p+'_swing_needed'] = (df['Winner_pct'] - df[p+'_pct']) / 2.00
df['Swing_to_gain_pct'] = np.sort(df[[p+'_swing_needed' for p in parties]], axis=1)[:, 1]
df['Majority_pct'] = df['Swing_to_gain_pct'] * 2.00
return df
| en | 0.783151 | Tidies up a raw Electoral Calculus data set :param df: Pandas DataFrame from reading a raw Electoral Calculus data set :param keep_ni: Boolean to indicate whether Northern Ireland seats should be kept in the output :param new: Dict of new parties. Keys are party abbreviation. Values are the votes per seat :return: Pandas DataFrame that's been nicely tidied up # Gather party codes # Name Northern Irish parties # Rename raw vote columns # Add new parties # Process Northern Ireland # Process NAT into SNP and Plaid # Further processing | 3.057273 | 3 |
samples/main.py | MindaugasVaitkus2/core-python | 1 | 6613908 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Topic: sample
Desc :
"""
import sys, os, re
__author__ = '<NAME>'
class User(object):
def __init__(self, name):
self.name = name
def aa():
for x in range(1, 10):
for y in range(1, x + 2):
yield '%d * %d = %d\t' % (y, x, x * y) if y <= x else '\n'
print('ddd')
b = 1
def bb():
a = b + 2
print(a)
def cc():
print(__name__)
pass
if __name__ == '__main__':
pass
# a = 1
# bb()
#
# disklist = [{'wwid': '111', 'name': '11112'},
# {'wwid': '222', 'name': '2222'},
# {'wwid': '333', 'name': '3332'},
# {'wwid': '444', 'name': '4442'}]
#
# all_wwiddisk_list = [{'wwid': '111', 'name': '11112'},
# {'wwid': '555', 'name': '5552'},
# {'wwid': '666', 'name': '6662'}]
#
# kk = (db_disk['wwid'] for db_disk in disklist)
# print(kk)
#
# print([dd for dd in all_wwiddisk_list if dd['wwid'] not in (db_disk['wwid'] for db_disk in disklist)])
#
# if '1' in ('1', '2'):
# print('OK')
#
# zz = (1, 2)
# print(type(zz))
#
# zz = {}
# print(type(zz))
#
# zz = ()
# print(type(zz))
#
# print(float.__new__(float))
# print(help(float))
# print(help(float.__new__))
test_str = '15:[xd_earn_factor<64.0953] yes=31,no=32,missing=31,gain=25.8518,cover=606'
print(re.split(r'[:\[<\]=,]', test_str))
print({ tt[0] : '%d'.formart(tt[1]) for tt in [rr.split('=') for rr in test_str.split()[1].split(',')]}) | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Topic: sample
Desc :
"""
import sys, os, re
__author__ = '<NAME>'
class User(object):
def __init__(self, name):
self.name = name
def aa():
for x in range(1, 10):
for y in range(1, x + 2):
yield '%d * %d = %d\t' % (y, x, x * y) if y <= x else '\n'
print('ddd')
b = 1
def bb():
a = b + 2
print(a)
def cc():
print(__name__)
pass
if __name__ == '__main__':
pass
# a = 1
# bb()
#
# disklist = [{'wwid': '111', 'name': '11112'},
# {'wwid': '222', 'name': '2222'},
# {'wwid': '333', 'name': '3332'},
# {'wwid': '444', 'name': '4442'}]
#
# all_wwiddisk_list = [{'wwid': '111', 'name': '11112'},
# {'wwid': '555', 'name': '5552'},
# {'wwid': '666', 'name': '6662'}]
#
# kk = (db_disk['wwid'] for db_disk in disklist)
# print(kk)
#
# print([dd for dd in all_wwiddisk_list if dd['wwid'] not in (db_disk['wwid'] for db_disk in disklist)])
#
# if '1' in ('1', '2'):
# print('OK')
#
# zz = (1, 2)
# print(type(zz))
#
# zz = {}
# print(type(zz))
#
# zz = ()
# print(type(zz))
#
# print(float.__new__(float))
# print(help(float))
# print(help(float.__new__))
test_str = '15:[xd_earn_factor<64.0953] yes=31,no=32,missing=31,gain=25.8518,cover=606'
print(re.split(r'[:\[<\]=,]', test_str))
print({ tt[0] : '%d'.formart(tt[1]) for tt in [rr.split('=') for rr in test_str.split()[1].split(',')]}) | en | 0.273163 | #!/usr/bin/env python # -*- encoding: utf-8 -*- Topic: sample Desc : # a = 1 # bb() # # disklist = [{'wwid': '111', 'name': '11112'}, # {'wwid': '222', 'name': '2222'}, # {'wwid': '333', 'name': '3332'}, # {'wwid': '444', 'name': '4442'}] # # all_wwiddisk_list = [{'wwid': '111', 'name': '11112'}, # {'wwid': '555', 'name': '5552'}, # {'wwid': '666', 'name': '6662'}] # # kk = (db_disk['wwid'] for db_disk in disklist) # print(kk) # # print([dd for dd in all_wwiddisk_list if dd['wwid'] not in (db_disk['wwid'] for db_disk in disklist)]) # # if '1' in ('1', '2'): # print('OK') # # zz = (1, 2) # print(type(zz)) # # zz = {} # print(type(zz)) # # zz = () # print(type(zz)) # # print(float.__new__(float)) # print(help(float)) # print(help(float.__new__)) | 3.357941 | 3 |
perception/fairotag/python/fairotag/graph.py | colesbury/fairo | 73 | 6613909 | <reponame>colesbury/fairo
from dataclasses import dataclass
from typing import List, Optional
import numpy as np
import sophus as sp
import gtsam
DEFAULT_HUBER_C = 1.345
USE_ANALYTICAL_JACOBIANS = False
# Factor graph object
class FactorGraph:
"""Wrapper around gtsam.NonlinearFactorGraph that provides functionalities:
- API that uses Sophus for pose inputs/outputs
- Macro methods for adding common factors
- Automatically maps named variables to gtsam shorthand variables
"""
def __init__(self):
self.gtsam_graph = gtsam.NonlinearFactorGraph()
self.factor_edges = {} # adjacency list (dict)
# Variables
self.values = gtsam.Values()
self.vars = {}
self.n_variables = 0
# TODOs for incremental smoothing:
# - maintain active values
# - update/delete factors
@staticmethod
def _process_noise(noise):
if noise is None:
return gtsam.noiseModel.Constrained.All(6)
else:
noise_gt = np.concatenate([np.array(noise[3:]), np.array(noise[:3])])
return gtsam.noiseModel.Robust(
gtsam.noiseModel.mEstimator.Huber(DEFAULT_HUBER_C),
gtsam.noiseModel.Diagonal.Sigmas(noise_gt),
)
def init_variable(self, name, pose=sp.SE3()):
pose_gt = sophus2gtsam(pose)
# Update pose only if already exists
if name in self.vars:
var = self.vars[name]
self.values.update(var, pose_gt)
return
# Create symbol
var = gtsam.symbol_shorthand.X(self.n_variables)
self.vars[name] = var
self.n_variables += 1
# Add to values
self.values.insert(var, pose_gt)
# Add to edges
self.factor_edges[var] = []
def add_prior(self, var_name, transform, noise=None):
""" Prior factor """
noise_gt = self._process_noise(noise)
transform_gt = sophus2gtsam(transform)
var = self.vars[var_name]
factor = gtsam.PriorFactorPose3(var, transform_gt, noise_gt)
self.gtsam_graph.push_back(factor)
def add_observation(self, var1_name, var2_name, transform, noise=None):
""" Between factor """
noise_gt = self._process_noise(noise)
transform_gt = sophus2gtsam(transform)
var1 = self.vars[var1_name]
var2 = self.vars[var2_name]
factor = gtsam.BetweenFactorPose3(var1, var2, transform_gt, noise_gt)
self.gtsam_graph.push_back(factor)
# Add edge information
self.factor_edges[var1].append((var2, transform_gt))
self.factor_edges[var2].append((var1, transform_gt.inverse()))
def add_fixed_transform(self, var1_name, var2_name, transform_name, noise=None):
""" Custom factor for constant transforms """
noise_gt = self._process_noise(noise)
var1 = self.vars[var1_name]
var2 = self.vars[var2_name]
transform = self.vars[transform_name]
factor = gtsam.CustomFactor(noise_gt, [var1, var2, transform], frame_error_func)
self.gtsam_graph.push_back(factor)
def bfs_initialization(self, root_var_name):
var0 = self.vars[root_var_name]
queue = [(var0, self.values.atPose3(var0))]
visited = set()
while queue:
curr_var, pose = queue.pop(0)
self.values.update(curr_var, pose)
for next_var, transform in self.factor_edges[curr_var]:
if next_var not in visited:
queue.append((next_var, pose * transform))
visited.add(curr_var)
def optimize(self, verbosity=0):
params = gtsam.LevenbergMarquardtParams()
params.setVerbosity(["SILENT", "TERMINATION"][verbosity])
params.setAbsoluteErrorTol(1e-10)
params.setRelativeErrorTol(1e-10)
optimizer = gtsam.LevenbergMarquardtOptimizer(self.gtsam_graph, self.values, params)
result_values = optimizer.optimize()
return {name: gtsam2sophus(result_values.atPose3(var)) for name, var in self.vars.items()}
# Helper functions
def sophus2gtsam(pose):
return gtsam.Pose3(pose.matrix())
def gtsam2sophus(pose):
return sp.SE3(pose.matrix())
# Custom factor for frames
def pose_jacobian_numerical(f, x, delta=1e-5):
jac = np.zeros([6, 6])
for i in range(6):
delta_arr = np.zeros(6)
delta_arr[i] = delta
pose_offset_p = gtsam.Pose3.Expmap(delta_arr) * x
pose_offset_n = gtsam.Pose3.Expmap(-delta_arr) * x
jac[:, i] = (f(pose_offset_p) - f(pose_offset_n)) / (2 * delta)
return jac
def pose_jacobian_analytical(f, x): # TODO: Debug
jac = np.zeros([6, 6])
jac_x = gtsam.Pose3.LogmapDerivative(x)
for i in range(6):
jac[:, i] = f(gtsam.Pose3.Expmap(jac_x[:, i]))
return jac
def frame_error_func(this: gtsam.CustomFactor, v, H: Optional[List[np.ndarray]]):
pose0 = v.atPose3(this.keys()[0])
pose1 = v.atPose3(this.keys()[1])
pose2 = v.atPose3(this.keys()[2])
# Compute error
def pose_err(pose_0, pose_1, pose_01_expected):
pose_01 = pose_0.between(pose_1)
error = pose_01_expected.localCoordinates(pose_01)
return error
error = pose_err(pose0, pose1, pose2)
# Compute Jacobians
if H is not None:
if USE_ANALYTICAL_JACOBIANS:
jac_func = pose_jacobian_analytical
else:
jac_func = pose_jacobian_numerical
H[0] = jac_func(
lambda x: pose_err(x, pose1, pose2),
x=pose0,
)
H[1] = jac_func(
lambda x: pose_err(pose0, x, pose2),
x=pose1,
)
H[2] = jac_func(
lambda x: pose_err(pose0, pose1, x),
x=pose2,
)
return error
| from dataclasses import dataclass
from typing import List, Optional
import numpy as np
import sophus as sp
import gtsam
DEFAULT_HUBER_C = 1.345
USE_ANALYTICAL_JACOBIANS = False
# Factor graph object
class FactorGraph:
"""Wrapper around gtsam.NonlinearFactorGraph that provides functionalities:
- API that uses Sophus for pose inputs/outputs
- Macro methods for adding common factors
- Automatically maps named variables to gtsam shorthand variables
"""
def __init__(self):
self.gtsam_graph = gtsam.NonlinearFactorGraph()
self.factor_edges = {} # adjacency list (dict)
# Variables
self.values = gtsam.Values()
self.vars = {}
self.n_variables = 0
# TODOs for incremental smoothing:
# - maintain active values
# - update/delete factors
@staticmethod
def _process_noise(noise):
if noise is None:
return gtsam.noiseModel.Constrained.All(6)
else:
noise_gt = np.concatenate([np.array(noise[3:]), np.array(noise[:3])])
return gtsam.noiseModel.Robust(
gtsam.noiseModel.mEstimator.Huber(DEFAULT_HUBER_C),
gtsam.noiseModel.Diagonal.Sigmas(noise_gt),
)
def init_variable(self, name, pose=sp.SE3()):
pose_gt = sophus2gtsam(pose)
# Update pose only if already exists
if name in self.vars:
var = self.vars[name]
self.values.update(var, pose_gt)
return
# Create symbol
var = gtsam.symbol_shorthand.X(self.n_variables)
self.vars[name] = var
self.n_variables += 1
# Add to values
self.values.insert(var, pose_gt)
# Add to edges
self.factor_edges[var] = []
def add_prior(self, var_name, transform, noise=None):
""" Prior factor """
noise_gt = self._process_noise(noise)
transform_gt = sophus2gtsam(transform)
var = self.vars[var_name]
factor = gtsam.PriorFactorPose3(var, transform_gt, noise_gt)
self.gtsam_graph.push_back(factor)
def add_observation(self, var1_name, var2_name, transform, noise=None):
""" Between factor """
noise_gt = self._process_noise(noise)
transform_gt = sophus2gtsam(transform)
var1 = self.vars[var1_name]
var2 = self.vars[var2_name]
factor = gtsam.BetweenFactorPose3(var1, var2, transform_gt, noise_gt)
self.gtsam_graph.push_back(factor)
# Add edge information
self.factor_edges[var1].append((var2, transform_gt))
self.factor_edges[var2].append((var1, transform_gt.inverse()))
def add_fixed_transform(self, var1_name, var2_name, transform_name, noise=None):
""" Custom factor for constant transforms """
noise_gt = self._process_noise(noise)
var1 = self.vars[var1_name]
var2 = self.vars[var2_name]
transform = self.vars[transform_name]
factor = gtsam.CustomFactor(noise_gt, [var1, var2, transform], frame_error_func)
self.gtsam_graph.push_back(factor)
def bfs_initialization(self, root_var_name):
var0 = self.vars[root_var_name]
queue = [(var0, self.values.atPose3(var0))]
visited = set()
while queue:
curr_var, pose = queue.pop(0)
self.values.update(curr_var, pose)
for next_var, transform in self.factor_edges[curr_var]:
if next_var not in visited:
queue.append((next_var, pose * transform))
visited.add(curr_var)
def optimize(self, verbosity=0):
params = gtsam.LevenbergMarquardtParams()
params.setVerbosity(["SILENT", "TERMINATION"][verbosity])
params.setAbsoluteErrorTol(1e-10)
params.setRelativeErrorTol(1e-10)
optimizer = gtsam.LevenbergMarquardtOptimizer(self.gtsam_graph, self.values, params)
result_values = optimizer.optimize()
return {name: gtsam2sophus(result_values.atPose3(var)) for name, var in self.vars.items()}
# Helper functions
def sophus2gtsam(pose):
return gtsam.Pose3(pose.matrix())
def gtsam2sophus(pose):
return sp.SE3(pose.matrix())
# Custom factor for frames
def pose_jacobian_numerical(f, x, delta=1e-5):
jac = np.zeros([6, 6])
for i in range(6):
delta_arr = np.zeros(6)
delta_arr[i] = delta
pose_offset_p = gtsam.Pose3.Expmap(delta_arr) * x
pose_offset_n = gtsam.Pose3.Expmap(-delta_arr) * x
jac[:, i] = (f(pose_offset_p) - f(pose_offset_n)) / (2 * delta)
return jac
def pose_jacobian_analytical(f, x): # TODO: Debug
jac = np.zeros([6, 6])
jac_x = gtsam.Pose3.LogmapDerivative(x)
for i in range(6):
jac[:, i] = f(gtsam.Pose3.Expmap(jac_x[:, i]))
return jac
def frame_error_func(this: gtsam.CustomFactor, v, H: Optional[List[np.ndarray]]):
pose0 = v.atPose3(this.keys()[0])
pose1 = v.atPose3(this.keys()[1])
pose2 = v.atPose3(this.keys()[2])
# Compute error
def pose_err(pose_0, pose_1, pose_01_expected):
pose_01 = pose_0.between(pose_1)
error = pose_01_expected.localCoordinates(pose_01)
return error
error = pose_err(pose0, pose1, pose2)
# Compute Jacobians
if H is not None:
if USE_ANALYTICAL_JACOBIANS:
jac_func = pose_jacobian_analytical
else:
jac_func = pose_jacobian_numerical
H[0] = jac_func(
lambda x: pose_err(x, pose1, pose2),
x=pose0,
)
H[1] = jac_func(
lambda x: pose_err(pose0, x, pose2),
x=pose1,
)
H[2] = jac_func(
lambda x: pose_err(pose0, pose1, x),
x=pose2,
)
return error | en | 0.61849 | # Factor graph object Wrapper around gtsam.NonlinearFactorGraph that provides functionalities: - API that uses Sophus for pose inputs/outputs - Macro methods for adding common factors - Automatically maps named variables to gtsam shorthand variables # adjacency list (dict) # Variables # TODOs for incremental smoothing: # - maintain active values # - update/delete factors # Update pose only if already exists # Create symbol # Add to values # Add to edges Prior factor Between factor # Add edge information Custom factor for constant transforms # Helper functions # Custom factor for frames # TODO: Debug # Compute error # Compute Jacobians | 2.574377 | 3 |
db_operator.py | ckyycc/hana_os_monitor_script_v2 | 6 | 6613910 | <reponame>ckyycc/hana_os_monitor_script_v2<gh_stars>1-10
import threading
from util import MonitorUtility as Mu
from util import MonitorConst as Mc
from util import KafKaUtility as Ku
from util import InfoType
from errors import MonitorDBOpError
from operation.db_operations import HANAMonitorDAO
class DBOperator(threading.Thread):
def __init__(self):
super().__init__()
self.__logger = Mu.get_logger(Mc.LOGGER_MONITOR_OPERATOR_DB)
self._db_operator = HANAOperatorService.instance()
def __operate(self, consumer):
operators = {
InfoType.MEMORY.value: self._db_operator.update_mem_monitoring_info,
InfoType.CPU.value: self._db_operator.update_cpu_monitoring_info,
InfoType.DISK.value: self._db_operator.update_disk_monitoring_info,
InfoType.INSTANCE.value: self._db_operator.update_instance_monitoring_info
}
for msg in consumer:
if msg and msg.value and Mc.MSG_TYPE in msg.value:
operators[msg.value[Mc.MSG_TYPE]](msg.value)
def run(self):
"""run the thread"""
while True:
consumer = Ku.get_consumer(Mc.MONITOR_GROUP_ID_DB_OPERATOR, Mc.TOPIC_FILTERED_INFO)
self.__operate(consumer)
Mu.log_warning(self.__logger, "Topic is empty or connection is lost. Trying to reconnect...")
class HANAOperatorService:
""" HANA Server DB operator, responsible for all DB relative operations, it's designed as singleton.
To get the instance of this class: HANAServerDBOperatorService.instance()
Initialize the class using HANAServerDBOperatorService() will raise an exception.
"""
__instance = None
@staticmethod
def instance():
"""static access method for singleton"""
if HANAOperatorService.__instance is None:
HANAOperatorService()
return HANAOperatorService.__instance
def __init__(self):
# implement the singleton class
if HANAOperatorService.__instance is not None:
raise MonitorDBOpError("This class is a singleton, use HANAServerDBOperatorService.instance() instead")
else:
HANAOperatorService.__instance = self
self.__monitor_dao = HANAMonitorDAO(Mc.get_hana_server(),
Mc.get_hana_port(),
Mc.get_hana_user(),
Mc.get_hana_password())
self.__logger = Mu.get_logger(Mc.LOGGER_MONITOR_OPERATOR_DB)
def __update_server_info(self, check_id, server_id, **kwargs):
disk_total = kwargs.get("disk_total", None)
disk_free = kwargs.get("disk_free", None)
mem_total = kwargs.get("mem_total", None)
mem_free = kwargs.get("mem_free", None)
cpu_usage = kwargs.get("cpu_usage", None)
server_info = {Mc.FIELD_DISK_TOTAL: disk_total,
Mc.FIELD_DISK_FREE: disk_free,
Mc.FIELD_MEM_TOTAL: mem_total,
Mc.FIELD_MEM_FREE: mem_free,
Mc.FIELD_CPU_UTILIZATION: cpu_usage}
self.__monitor_dao.update_server_monitoring_info(check_id, server_id, server_info)
def update_mem_monitoring_info(self, info):
cid = info[Mc.FIELD_CHECK_ID]
sid = info[Mc.FIELD_SERVER_ID]
self.__update_server_info(cid, sid, mem_total=info[Mc.FIELD_MEM_TOTAL], mem_free=info[Mc.FIELD_MEM_FREE])
self.__monitor_dao.update_mem_monitoring_info(cid, sid, info[Mc.MSG_INFO])
def update_disk_monitoring_info(self, info):
cid = info[Mc.FIELD_CHECK_ID]
sid = info[Mc.FIELD_SERVER_ID]
self.__update_server_info(cid, sid, disk_total=info[Mc.FIELD_DISK_TOTAL], disk_free=info[Mc.FIELD_DISK_FREE])
self.__monitor_dao.update_disk_monitoring_info(cid, sid, info[Mc.MSG_INFO])
def update_cpu_monitoring_info(self, info):
cid = info[Mc.FIELD_CHECK_ID]
sid = info[Mc.FIELD_SERVER_ID]
self.__update_server_info(cid, sid, cpu_usage=info[Mc.FIELD_CPU_UTILIZATION])
self.__monitor_dao.update_cpu_monitoring_info(cid, sid, info[Mc.MSG_INFO])
def update_instance_monitoring_info(self, info):
cid = info[Mc.FIELD_CHECK_ID]
sid = info[Mc.FIELD_SERVER_ID]
self.__monitor_dao.update_instance_info(cid, sid, info[Mc.MSG_INFO])
if __name__ == '__main__':
DBOperator().start()
| import threading
from util import MonitorUtility as Mu
from util import MonitorConst as Mc
from util import KafKaUtility as Ku
from util import InfoType
from errors import MonitorDBOpError
from operation.db_operations import HANAMonitorDAO
class DBOperator(threading.Thread):
def __init__(self):
super().__init__()
self.__logger = Mu.get_logger(Mc.LOGGER_MONITOR_OPERATOR_DB)
self._db_operator = HANAOperatorService.instance()
def __operate(self, consumer):
operators = {
InfoType.MEMORY.value: self._db_operator.update_mem_monitoring_info,
InfoType.CPU.value: self._db_operator.update_cpu_monitoring_info,
InfoType.DISK.value: self._db_operator.update_disk_monitoring_info,
InfoType.INSTANCE.value: self._db_operator.update_instance_monitoring_info
}
for msg in consumer:
if msg and msg.value and Mc.MSG_TYPE in msg.value:
operators[msg.value[Mc.MSG_TYPE]](msg.value)
def run(self):
"""run the thread"""
while True:
consumer = Ku.get_consumer(Mc.MONITOR_GROUP_ID_DB_OPERATOR, Mc.TOPIC_FILTERED_INFO)
self.__operate(consumer)
Mu.log_warning(self.__logger, "Topic is empty or connection is lost. Trying to reconnect...")
class HANAOperatorService:
""" HANA Server DB operator, responsible for all DB relative operations, it's designed as singleton.
To get the instance of this class: HANAServerDBOperatorService.instance()
Initialize the class using HANAServerDBOperatorService() will raise an exception.
"""
__instance = None
@staticmethod
def instance():
"""static access method for singleton"""
if HANAOperatorService.__instance is None:
HANAOperatorService()
return HANAOperatorService.__instance
def __init__(self):
# implement the singleton class
if HANAOperatorService.__instance is not None:
raise MonitorDBOpError("This class is a singleton, use HANAServerDBOperatorService.instance() instead")
else:
HANAOperatorService.__instance = self
self.__monitor_dao = HANAMonitorDAO(Mc.get_hana_server(),
Mc.get_hana_port(),
Mc.get_hana_user(),
Mc.get_hana_password())
self.__logger = Mu.get_logger(Mc.LOGGER_MONITOR_OPERATOR_DB)
def __update_server_info(self, check_id, server_id, **kwargs):
disk_total = kwargs.get("disk_total", None)
disk_free = kwargs.get("disk_free", None)
mem_total = kwargs.get("mem_total", None)
mem_free = kwargs.get("mem_free", None)
cpu_usage = kwargs.get("cpu_usage", None)
server_info = {Mc.FIELD_DISK_TOTAL: disk_total,
Mc.FIELD_DISK_FREE: disk_free,
Mc.FIELD_MEM_TOTAL: mem_total,
Mc.FIELD_MEM_FREE: mem_free,
Mc.FIELD_CPU_UTILIZATION: cpu_usage}
self.__monitor_dao.update_server_monitoring_info(check_id, server_id, server_info)
def update_mem_monitoring_info(self, info):
cid = info[Mc.FIELD_CHECK_ID]
sid = info[Mc.FIELD_SERVER_ID]
self.__update_server_info(cid, sid, mem_total=info[Mc.FIELD_MEM_TOTAL], mem_free=info[Mc.FIELD_MEM_FREE])
self.__monitor_dao.update_mem_monitoring_info(cid, sid, info[Mc.MSG_INFO])
def update_disk_monitoring_info(self, info):
cid = info[Mc.FIELD_CHECK_ID]
sid = info[Mc.FIELD_SERVER_ID]
self.__update_server_info(cid, sid, disk_total=info[Mc.FIELD_DISK_TOTAL], disk_free=info[Mc.FIELD_DISK_FREE])
self.__monitor_dao.update_disk_monitoring_info(cid, sid, info[Mc.MSG_INFO])
def update_cpu_monitoring_info(self, info):
cid = info[Mc.FIELD_CHECK_ID]
sid = info[Mc.FIELD_SERVER_ID]
self.__update_server_info(cid, sid, cpu_usage=info[Mc.FIELD_CPU_UTILIZATION])
self.__monitor_dao.update_cpu_monitoring_info(cid, sid, info[Mc.MSG_INFO])
def update_instance_monitoring_info(self, info):
cid = info[Mc.FIELD_CHECK_ID]
sid = info[Mc.FIELD_SERVER_ID]
self.__monitor_dao.update_instance_info(cid, sid, info[Mc.MSG_INFO])
if __name__ == '__main__':
DBOperator().start() | en | 0.703527 | run the thread HANA Server DB operator, responsible for all DB relative operations, it's designed as singleton. To get the instance of this class: HANAServerDBOperatorService.instance() Initialize the class using HANAServerDBOperatorService() will raise an exception. static access method for singleton # implement the singleton class | 2.248645 | 2 |
tools/comp_yuprtest.py | VulcanClimateModeling/testsuite | 0 | 6613911 | #!/usr/bin/env python
"""
COSMO TECHNICAL TESTSUITE
General purpose script to compare two YUPRTEST output files
"""
# built-in modules
import os, sys, string
# information
__author__ = "<NAME>, <NAME>"
__email__ = "<EMAIL>"
__maintainer__ = "<EMAIL>"
def cmp_(file1,file2, \
v_level=0,minval=1e-15, \
nts=[10,100,200], \
tol_ts=[1e-15,1e-15,1e-15], \
tol_as=[1e-15,1e-15,1e-15]):
# compare two YUPRTEST file1, file2 with tolerance tol_*
# Values smaller than minval are not considered.
# Different threshold are used for different time step.
# v_level:verbose level
# -1 -> no print out
# 0 -> max diff over all variables for each time step, short
# 1 -> max diff over all variables for each time step, print lines
# 2 -> show all lines with differences above tol_
# if minval is set to -1 compares absolute differences
# the comparison is only done for overlapping time steps
# check file existence
if not(os.path.exists(file1)):
print('File '+file1+' does not exist')
return -1
elif not(os.path.exists(file2)):
print('File '+file2+' does not exist')
return -1
# open file
data1=open(file1).readlines()
data2=open(file2).readlines()
# variables initialisation
error_count = 0 #number of error detected
tol_t = tol_ts[0] #set tolerence for t
tol_a = tol_as[0] #set tolerence for all other variables
maxdiff_t=0. #maximal diff per time step for t or p
line1_t='' #line file1 with max diff
line2_t='' #line file2 with max diff
lnum_t=0
maxdiff_a=0. #maximal diff per time step for all variables
line1_a='' #line file1 with max diff
line2_a='' #line file2 with max diff
lnum_a=0
print_header = True
header = ' '
comment_type='#' #comment at the begining wil be skiped
ntstep=0
lerror_t= False
lerror_a = False
if v_level==0:
print_out='./tools/comp_yuprtest.py ' + file1 + ' ' + file2 + ' ' + str(v_level) + ' ' + str(minval) + \
' ' + ','.join([str(x) for x in nts]) + ' ' + ','.join([str(x) for x in tol_ts]) + \
' ' + ','.join([str(x) for x in tol_as]) + '\n'
if minval==-1:
print_out+='Absolute error:\n'
else:
print_out+='Relative error:\n'
print_out+=' nt max_all t Test \n'
if v_level>0:
if minval==-1:
print('Comparing absolute differences ...')
else:
print('Comparing relative differences, min. value is %1.0e ...' %(minval))
# check that files are not empty
if len(data1)==0:
print('file ' + file1 + ' is empty!')
return -1
if len(data1)<=4:
print('file ' + file1 + ' contains only header!')
return -1
if len(data2)==0:
print('file ' + file2 + ' is empty!')
return -1
if len(data2)<=4:
print('file ' + file2 + ' contains only header!')
return -1
# set file counter
i1=0
i2=0
# remove the headers part (all lines starting with comment_type)
# first on file1
while True:
line=data1[i1].split()
if line[0]!=comment_type:
break
i1=i1+1
# then on file2
while True:
line=data2[i2].split()
if line[0]!=comment_type:
break
i2=i2+1
# Set the file counters to identical time step
while True:
#check eof
if (i1>=len(data1)) or (i2>=len(data2)):
print('Files %s and %s do not have overlapping time steps and can not be compared.' %(file1,file2))
return -1
l1=data1[i1].split()
l2=data2[i2].split()
nt1=int(l1[1])
nt2=int(l2[1])
if nt1<nt2:
i1=i1+1
elif nt1>nt2:
i2=i2+1
elif nt1==nt2:
break
else:
break
ntstep=nt1
leof=False
#----------------------------------------------------------------------------------------------------
#loop over file lines
while True:
#check eof
if (i1>=len(data1)) or (i2>=len(data2)):
leof=True
#read file
else:
l1=data1[i1].split()
l2=data2[i2].split()
#----------------------------------------------------------------------------------------------------
#prepare printout if new time step or eof
if (int(l1[1]) != ntstep ) or leof:
if (v_level==0):
if (lerror_t) or (lerror_a):
print_out+='%4i %1.2e %1.2e FAILED \n' %(ntstep,maxdiff_a,maxdiff_t)
else:
print_out+='%4i %1.2e %1.2e OK \n' %(ntstep,maxdiff_a,maxdiff_t)
#print if verbose=1 and error at this step
if (lerror_t) and (v_level==1):
if print_header:
print(header)
print_header=False
print('nt=%i, max rel. er. t,p: %1.1e above threshold %1.1e, at line %i' %(ntstep,maxdiff_t,tol_t,lnum_t))
print('>'+ line1_t.rstrip())
print('<'+ line2_t)
if (lerror_a) and (v_level==1):
if print_header:
print(header)
print_header=False
print('nt=%i, max rel. er. all: %1.1e above threshold %1.1e, at line %i' %(ntstep,maxdiff_a,tol_a,lnum_a))
print('>'+ line1_a.rstrip())
print('<'+ line2_a)
#exit loop if eof
if leof:
break
#set step and reset local error counter
ntstep=int(l1[1])
lerror_t=False
lerror_a =False
maxdiff_t=0.
maxdiff_a=0.
# Set threshold
#
# tol[i] is set for t=[nts[i] nts[i+1]]
for i in range(len(nts)):
# update threshold if step larger than nts
if ntstep >= nts[i]:
#only update if i<=len(tol_ts)
if (i<len(tol_ts)-1):
tol_t=tol_ts[i+1]
tol_a=tol_as[i+1]
#----------------------------------------------------------------------------------------------------
# Comparing lines
varname=l1[0]
varname2=l2[0]
#check that it is the same variable in both file
if varname.strip()!=varname2.strip():
print('!! Error: Variables differ')
print(' %s at line %i in file %s' %(varname,i1+1,file1))
print(' %s at line %i in file %s' %(varname2,i2+1,file2))
error_count+=1
lerror_t=True
lerror_a =True
return -1
#check that it is the same time step
if int(l1[1])!=int(l2[1]):
print('!! Error: Time steps differ')
print(' nt=%s at line %i in file %s' %(l1[1],i1+1,file1))
print(' nt=%s at line %i in file %s' %(l2[1],i2+1,file2))
error_count+=1
lerror_t=True
lerror_a =True
return -1
#compare numerical values on this line
for j in range(len(l1)):
pr_line=True
if is_num(l1[j]):
n1=float(l1[j])
n2=float(l2[j])
#absolute diffference
if minval==-1 and is_not_int(n1): #note: int are not considered (min-max index)
ldiff=abs(n1-n2)
#relative diffference
elif abs(n1)>minval and is_not_int(n1): #note: int are not considered (min-max index)
ldiff=abs((n1-n2)/n1)
else:
ldiff=0
#Use tol_t threshold for temperature field
if (varname in ['T']):
# save max
if ldiff > maxdiff_t:
maxdiff_t=ldiff
line1_t=data1[i1]
line2_t=data2[i2]
lnum_t=i1+1
#check if larger than tol
if ldiff > tol_t:
error_count+=1
lerror_t=True
# print line
if (v_level==2 and pr_line):
if print_header:
print(header)
print_header=False
if pr_line:
print('>' + data1[i1].rstrip())
print('<' + data2[i2])
pr_line=False
#Use tol_a threshold for all other fields
else:
# save max
if ldiff > maxdiff_a:
maxdiff_a=ldiff
line1_a=data1[i1]
line2_a=data2[i2]
lnum_a=i1+1
#check if larger than tol
if ldiff > tol_a:
error_count+=1
lerror_a=True
# print line
if (v_level==2 and pr_line):
if print_header:
print(header)
print_header=False
if pr_line:
print('>' + data1[i1].rstrip())
print('<' + data2[i2])
pr_line=False
# moves forward the 2 file counters
i1=i1+1
i2=i2+1
#print if error detected for verbose 0
if (v_level==0) and (error_count>0):
print(print_out)
if v_level>0 and error_count==0:
print('no difference above threshold')
return error_count
#----------------------------------------------------------------------------
# Local functions
def is_num(x):
test=True
try:
a=float(x)
except ValueError:
test=False
return test
def is_not_int(x):
#note: if x=0.0 is return True for this test
test=False
try:
a=int(x)
if x!=0.0:
if a/x!=1:
test=True
else:
test=True
except ValueError:
test=True
return test
#-----------------------------------
#execute as a script
if __name__ == "__main__":
if len(sys.argv)==3:
cmp_(sys.argv[1],sys.argv[2])
elif len(sys.argv)==4:
cmp_(sys.argv[1],sys.argv[2],int(sys.argv[3]))
elif len(sys.argv)==5:
cmp_(sys.argv[1],sys.argv[2],int(sys.argv[3]), \
float(sys.argv[4]))
elif len(sys.argv)==6:
cmp_(sys.argv[1],sys.argv[2],int(sys.argv[3]), \
float(sys.argv[4]), \
[float(el) for el in sys.argv[5].split(',')])
elif len(sys.argv)==7:
cmp_(sys.argv[1],sys.argv[2],int(sys.argv[3]), \
float(sys.argv[4]), \
[float(el) for el in sys.argv[5].split(',')], \
[float(el) for el in sys.argv[6].split(',')])
elif len(sys.argv)==8:
cmp_(sys.argv[1],sys.argv[2],int(sys.argv[3]), \
float(sys.argv[4]), \
[float(el) for el in sys.argv[5].split(',')], \
[float(el) for el in sys.argv[6].split(',')], \
[float(el) for el in sys.argv[7].split(',')])
else:
print('''USAGE : compare_files.py file1 file2 [v_level=0 minval=1e-15
nts=10,100,200
tol_ts=1e-15,1e-15,1e-15
tol_as=1e-15,1e-15,1e-15 ]
DEFINITION : Compare relative differences between two YUPRTEST file1, file2
with tolerance tol_*. Values smaller than minval are not considered.
Different thresholds are used for different time steps. Thresholds tol_ts are used for
the temperature field while tol_as are used for all other fields.
if minval set to -1 compares absolute instead of relative differences. ''')
| #!/usr/bin/env python
"""
COSMO TECHNICAL TESTSUITE
General purpose script to compare two YUPRTEST output files
"""
# built-in modules
import os, sys, string
# information
__author__ = "<NAME>, <NAME>"
__email__ = "<EMAIL>"
__maintainer__ = "<EMAIL>"
def cmp_(file1,file2, \
v_level=0,minval=1e-15, \
nts=[10,100,200], \
tol_ts=[1e-15,1e-15,1e-15], \
tol_as=[1e-15,1e-15,1e-15]):
# compare two YUPRTEST file1, file2 with tolerance tol_*
# Values smaller than minval are not considered.
# Different threshold are used for different time step.
# v_level:verbose level
# -1 -> no print out
# 0 -> max diff over all variables for each time step, short
# 1 -> max diff over all variables for each time step, print lines
# 2 -> show all lines with differences above tol_
# if minval is set to -1 compares absolute differences
# the comparison is only done for overlapping time steps
# check file existence
if not(os.path.exists(file1)):
print('File '+file1+' does not exist')
return -1
elif not(os.path.exists(file2)):
print('File '+file2+' does not exist')
return -1
# open file
data1=open(file1).readlines()
data2=open(file2).readlines()
# variables initialisation
error_count = 0 #number of error detected
tol_t = tol_ts[0] #set tolerence for t
tol_a = tol_as[0] #set tolerence for all other variables
maxdiff_t=0. #maximal diff per time step for t or p
line1_t='' #line file1 with max diff
line2_t='' #line file2 with max diff
lnum_t=0
maxdiff_a=0. #maximal diff per time step for all variables
line1_a='' #line file1 with max diff
line2_a='' #line file2 with max diff
lnum_a=0
print_header = True
header = ' '
comment_type='#' #comment at the begining wil be skiped
ntstep=0
lerror_t= False
lerror_a = False
if v_level==0:
print_out='./tools/comp_yuprtest.py ' + file1 + ' ' + file2 + ' ' + str(v_level) + ' ' + str(minval) + \
' ' + ','.join([str(x) for x in nts]) + ' ' + ','.join([str(x) for x in tol_ts]) + \
' ' + ','.join([str(x) for x in tol_as]) + '\n'
if minval==-1:
print_out+='Absolute error:\n'
else:
print_out+='Relative error:\n'
print_out+=' nt max_all t Test \n'
if v_level>0:
if minval==-1:
print('Comparing absolute differences ...')
else:
print('Comparing relative differences, min. value is %1.0e ...' %(minval))
# check that files are not empty
if len(data1)==0:
print('file ' + file1 + ' is empty!')
return -1
if len(data1)<=4:
print('file ' + file1 + ' contains only header!')
return -1
if len(data2)==0:
print('file ' + file2 + ' is empty!')
return -1
if len(data2)<=4:
print('file ' + file2 + ' contains only header!')
return -1
# set file counter
i1=0
i2=0
# remove the headers part (all lines starting with comment_type)
# first on file1
while True:
line=data1[i1].split()
if line[0]!=comment_type:
break
i1=i1+1
# then on file2
while True:
line=data2[i2].split()
if line[0]!=comment_type:
break
i2=i2+1
# Set the file counters to identical time step
while True:
#check eof
if (i1>=len(data1)) or (i2>=len(data2)):
print('Files %s and %s do not have overlapping time steps and can not be compared.' %(file1,file2))
return -1
l1=data1[i1].split()
l2=data2[i2].split()
nt1=int(l1[1])
nt2=int(l2[1])
if nt1<nt2:
i1=i1+1
elif nt1>nt2:
i2=i2+1
elif nt1==nt2:
break
else:
break
ntstep=nt1
leof=False
#----------------------------------------------------------------------------------------------------
#loop over file lines
while True:
#check eof
if (i1>=len(data1)) or (i2>=len(data2)):
leof=True
#read file
else:
l1=data1[i1].split()
l2=data2[i2].split()
#----------------------------------------------------------------------------------------------------
#prepare printout if new time step or eof
if (int(l1[1]) != ntstep ) or leof:
if (v_level==0):
if (lerror_t) or (lerror_a):
print_out+='%4i %1.2e %1.2e FAILED \n' %(ntstep,maxdiff_a,maxdiff_t)
else:
print_out+='%4i %1.2e %1.2e OK \n' %(ntstep,maxdiff_a,maxdiff_t)
#print if verbose=1 and error at this step
if (lerror_t) and (v_level==1):
if print_header:
print(header)
print_header=False
print('nt=%i, max rel. er. t,p: %1.1e above threshold %1.1e, at line %i' %(ntstep,maxdiff_t,tol_t,lnum_t))
print('>'+ line1_t.rstrip())
print('<'+ line2_t)
if (lerror_a) and (v_level==1):
if print_header:
print(header)
print_header=False
print('nt=%i, max rel. er. all: %1.1e above threshold %1.1e, at line %i' %(ntstep,maxdiff_a,tol_a,lnum_a))
print('>'+ line1_a.rstrip())
print('<'+ line2_a)
#exit loop if eof
if leof:
break
#set step and reset local error counter
ntstep=int(l1[1])
lerror_t=False
lerror_a =False
maxdiff_t=0.
maxdiff_a=0.
# Set threshold
#
# tol[i] is set for t=[nts[i] nts[i+1]]
for i in range(len(nts)):
# update threshold if step larger than nts
if ntstep >= nts[i]:
#only update if i<=len(tol_ts)
if (i<len(tol_ts)-1):
tol_t=tol_ts[i+1]
tol_a=tol_as[i+1]
#----------------------------------------------------------------------------------------------------
# Comparing lines
varname=l1[0]
varname2=l2[0]
#check that it is the same variable in both file
if varname.strip()!=varname2.strip():
print('!! Error: Variables differ')
print(' %s at line %i in file %s' %(varname,i1+1,file1))
print(' %s at line %i in file %s' %(varname2,i2+1,file2))
error_count+=1
lerror_t=True
lerror_a =True
return -1
#check that it is the same time step
if int(l1[1])!=int(l2[1]):
print('!! Error: Time steps differ')
print(' nt=%s at line %i in file %s' %(l1[1],i1+1,file1))
print(' nt=%s at line %i in file %s' %(l2[1],i2+1,file2))
error_count+=1
lerror_t=True
lerror_a =True
return -1
#compare numerical values on this line
for j in range(len(l1)):
pr_line=True
if is_num(l1[j]):
n1=float(l1[j])
n2=float(l2[j])
#absolute diffference
if minval==-1 and is_not_int(n1): #note: int are not considered (min-max index)
ldiff=abs(n1-n2)
#relative diffference
elif abs(n1)>minval and is_not_int(n1): #note: int are not considered (min-max index)
ldiff=abs((n1-n2)/n1)
else:
ldiff=0
#Use tol_t threshold for temperature field
if (varname in ['T']):
# save max
if ldiff > maxdiff_t:
maxdiff_t=ldiff
line1_t=data1[i1]
line2_t=data2[i2]
lnum_t=i1+1
#check if larger than tol
if ldiff > tol_t:
error_count+=1
lerror_t=True
# print line
if (v_level==2 and pr_line):
if print_header:
print(header)
print_header=False
if pr_line:
print('>' + data1[i1].rstrip())
print('<' + data2[i2])
pr_line=False
#Use tol_a threshold for all other fields
else:
# save max
if ldiff > maxdiff_a:
maxdiff_a=ldiff
line1_a=data1[i1]
line2_a=data2[i2]
lnum_a=i1+1
#check if larger than tol
if ldiff > tol_a:
error_count+=1
lerror_a=True
# print line
if (v_level==2 and pr_line):
if print_header:
print(header)
print_header=False
if pr_line:
print('>' + data1[i1].rstrip())
print('<' + data2[i2])
pr_line=False
# moves forward the 2 file counters
i1=i1+1
i2=i2+1
#print if error detected for verbose 0
if (v_level==0) and (error_count>0):
print(print_out)
if v_level>0 and error_count==0:
print('no difference above threshold')
return error_count
#----------------------------------------------------------------------------
# Local functions
def is_num(x):
test=True
try:
a=float(x)
except ValueError:
test=False
return test
def is_not_int(x):
#note: if x=0.0 is return True for this test
test=False
try:
a=int(x)
if x!=0.0:
if a/x!=1:
test=True
else:
test=True
except ValueError:
test=True
return test
#-----------------------------------
#execute as a script
if __name__ == "__main__":
if len(sys.argv)==3:
cmp_(sys.argv[1],sys.argv[2])
elif len(sys.argv)==4:
cmp_(sys.argv[1],sys.argv[2],int(sys.argv[3]))
elif len(sys.argv)==5:
cmp_(sys.argv[1],sys.argv[2],int(sys.argv[3]), \
float(sys.argv[4]))
elif len(sys.argv)==6:
cmp_(sys.argv[1],sys.argv[2],int(sys.argv[3]), \
float(sys.argv[4]), \
[float(el) for el in sys.argv[5].split(',')])
elif len(sys.argv)==7:
cmp_(sys.argv[1],sys.argv[2],int(sys.argv[3]), \
float(sys.argv[4]), \
[float(el) for el in sys.argv[5].split(',')], \
[float(el) for el in sys.argv[6].split(',')])
elif len(sys.argv)==8:
cmp_(sys.argv[1],sys.argv[2],int(sys.argv[3]), \
float(sys.argv[4]), \
[float(el) for el in sys.argv[5].split(',')], \
[float(el) for el in sys.argv[6].split(',')], \
[float(el) for el in sys.argv[7].split(',')])
else:
print('''USAGE : compare_files.py file1 file2 [v_level=0 minval=1e-15
nts=10,100,200
tol_ts=1e-15,1e-15,1e-15
tol_as=1e-15,1e-15,1e-15 ]
DEFINITION : Compare relative differences between two YUPRTEST file1, file2
with tolerance tol_*. Values smaller than minval are not considered.
Different thresholds are used for different time steps. Thresholds tol_ts are used for
the temperature field while tol_as are used for all other fields.
if minval set to -1 compares absolute instead of relative differences. ''')
| en | 0.680434 | #!/usr/bin/env python COSMO TECHNICAL TESTSUITE General purpose script to compare two YUPRTEST output files # built-in modules # information # compare two YUPRTEST file1, file2 with tolerance tol_* # Values smaller than minval are not considered. # Different threshold are used for different time step. # v_level:verbose level # -1 -> no print out # 0 -> max diff over all variables for each time step, short # 1 -> max diff over all variables for each time step, print lines # 2 -> show all lines with differences above tol_ # if minval is set to -1 compares absolute differences # the comparison is only done for overlapping time steps # check file existence # open file # variables initialisation #number of error detected #set tolerence for t #set tolerence for all other variables #maximal diff per time step for t or p #line file1 with max diff #line file2 with max diff #maximal diff per time step for all variables #line file1 with max diff #line file2 with max diff #comment at the begining wil be skiped # check that files are not empty # set file counter # remove the headers part (all lines starting with comment_type) # first on file1 # then on file2 # Set the file counters to identical time step #check eof #---------------------------------------------------------------------------------------------------- #loop over file lines #check eof #read file #---------------------------------------------------------------------------------------------------- #prepare printout if new time step or eof #print if verbose=1 and error at this step #exit loop if eof #set step and reset local error counter # Set threshold # # tol[i] is set for t=[nts[i] nts[i+1]] # update threshold if step larger than nts #only update if i<=len(tol_ts) #---------------------------------------------------------------------------------------------------- # Comparing lines #check that it is the same variable in both file #check that it is the same time step #compare numerical values on this line #absolute diffference #note: int are not considered (min-max index) #relative diffference #note: int are not considered (min-max index) #Use tol_t threshold for temperature field # save max #check if larger than tol # print line #Use tol_a threshold for all other fields # save max #check if larger than tol # print line # moves forward the 2 file counters #print if error detected for verbose 0 #---------------------------------------------------------------------------- # Local functions #note: if x=0.0 is return True for this test #----------------------------------- #execute as a script USAGE : compare_files.py file1 file2 [v_level=0 minval=1e-15 nts=10,100,200 tol_ts=1e-15,1e-15,1e-15 tol_as=1e-15,1e-15,1e-15 ] DEFINITION : Compare relative differences between two YUPRTEST file1, file2 with tolerance tol_*. Values smaller than minval are not considered. Different thresholds are used for different time steps. Thresholds tol_ts are used for the temperature field while tol_as are used for all other fields. if minval set to -1 compares absolute instead of relative differences. | 2.787315 | 3 |
backend/portfolios/forms.py | NumanIbnMazid/numanibnmazid.com | 1 | 6613912 | from django import forms
from django.utils.translation import gettext_lazy as _
from portfolios.models import (
Skill,
ProfessionalExperience, ProfessionalExperienceMedia,
Education, EducationMedia,
Certification, CertificationMedia,
Project, ProjectMedia,
Interest,
Testimonial
)
from utils.validators import get_validated_file, get_validated_image
# ----------------------------------------------------
# *** Skill Forms ***
# ----------------------------------------------------
class SkillForm(forms.ModelForm):
class Meta:
model = Skill
fields = ('title', 'image')
def clean_image(self):
image = self.cleaned_data.get('image')
return get_validated_image(image)
# ----------------------------------------------------
# *** Professional Experience Forms ***
# ----------------------------------------------------
class ProfessionalExperienceForm(forms.ModelForm):
class Meta:
model = ProfessionalExperience
fields = ['company', 'company_image', 'address', 'designation', 'job_type',
'start_date', 'end_date', 'currently_working', 'description']
widgets = {
'start_date': forms.TextInput(attrs={'class': 'form-control', 'id': 'start_date'}),
'end_date': forms.TextInput(attrs={'class': 'form-control', 'id': 'end_date'}),
'description': forms.Textarea(
attrs={
'class': 'form-control',
'rows': 3,
'cols': 3,
}
),
}
def clean(self):
cleaned_data = super(ProfessionalExperienceForm, self).clean()
if cleaned_data.get('end_date', None) is None and not cleaned_data.get('currently_working', None):
self.add_error('end_date', _("End date is required if you are not currently working here."))
elif cleaned_data.get('end_date') is not None and cleaned_data.get('currently_working'):
self.add_error('end_date', _("End date is not required if you are currently working here."))
self.add_error('currently_working', _("Currently working is not required if you provide an end date."))
raise forms.ValidationError(
_("Conflicting with `End date` and `Currently working`. Please specify only one.")
)
return cleaned_data
def clean_end_date(self):
end_date = self.cleaned_data.get('end_date')
if end_date and end_date <= self.cleaned_data.get('start_date'):
raise forms.ValidationError(_("End date must be greater than start date"))
return end_date
def clean_company_image(self):
company_image = self.cleaned_data.get('company_image')
return get_validated_image(company_image)
class ProfessionalExperienceMediaForm(forms.ModelForm):
class Meta:
model = ProfessionalExperienceMedia
fields = ("file",)
class ProfessionalExperienceWithMediaForm(ProfessionalExperienceForm):
""" ProfessionalExperienceWithMediaForm = ProfessionalExperienceForm + ProfessionalExperienceMediaForm """
# multiple file form field
file = forms.FileField(required=False, widget=forms.ClearableFileInput(attrs={'multiple': True}))
class Meta(ProfessionalExperienceForm.Meta):
fields = ProfessionalExperienceForm.Meta.fields + ['file', ]
def clean_file(self):
file = self.cleaned_data.get('file')
return get_validated_file(file)
# ----------------------------------------------------
# *** Education Forms ***
# ----------------------------------------------------
class EducationForm(forms.ModelForm):
class Meta:
model = Education
fields = ['school', 'degree', 'address', 'field_of_study',
'start_date', 'end_date', 'currently_studying', 'grade', 'activities', 'description']
widgets = {
'start_date': forms.TextInput(attrs={'class': 'form-control', 'id': 'start_date'}),
'end_date': forms.TextInput(attrs={'class': 'form-control', 'id': 'end_date'}),
'activities': forms.Textarea(
attrs={
'class': 'form-control',
'rows': 2,
'cols': 2,
}
),
'description': forms.Textarea(
attrs={
'class': 'form-control',
'rows': 3,
'cols': 3,
}
),
}
def clean(self):
cleaned_data = super(EducationForm, self).clean()
if cleaned_data.get('end_date', None) is None and not cleaned_data.get('currently_studying', None):
self.add_error('end_date', _("End date is required if you are not currently studying here."))
elif cleaned_data.get('end_date') is not None and cleaned_data.get('currently_studying'):
self.add_error('end_date', _("End date is not required if you are currently studying here."))
self.add_error('currently_studying', _("Currently studying is not required if you provide an end date."))
raise forms.ValidationError(
_("Conflicting with `End date` and `Currently studying`. Please specify only one.")
)
return cleaned_data
def clean_end_date(self):
end_date = self.cleaned_data.get('end_date')
if end_date and end_date <= self.cleaned_data.get('start_date'):
raise forms.ValidationError(_("End date must be greater than start date"))
return end_date
class EducationMediaForm(forms.ModelForm):
class Meta:
model = EducationMedia
fields = ("file",)
class EducationWithMediaForm(EducationForm):
""" EducationWithMediaForm = EducationForm + EducationMediaForm """
# multiple file form field
file = forms.FileField(required=False, widget=forms.ClearableFileInput(attrs={'multiple': True}))
class Meta(EducationForm.Meta):
fields = EducationForm.Meta.fields + ['file', ]
def clean_file(self):
file = self.cleaned_data.get('file')
return get_validated_file(file)
# ----------------------------------------------------
# *** Certification Forms ***
# ----------------------------------------------------
class CertificationForm(forms.ModelForm):
class Meta:
model = Certification
fields = ['name', 'organization', 'address', 'issue_date', 'expiration_date',
'does_not_expire', 'credential_id', 'credential_url', 'description']
widgets = {
'issue_date': forms.TextInput(attrs={'class': 'form-control', 'id': 'issue_date'}),
'expiration_date': forms.TextInput(attrs={'class': 'form-control', 'id': 'expiration_date'}),
'description': forms.Textarea(
attrs={
'class': 'form-control',
'rows': 3,
'cols': 3,
}
),
}
def clean(self):
cleaned_data = super(CertificationForm, self).clean()
if cleaned_data.get('expiration_date', None) is None and not cleaned_data.get('does_not_expire', None):
self.add_error('expiration_date', _("Expiration Date is required if this credential does expire."))
elif cleaned_data.get('expiration_date') is not None and cleaned_data.get('does_not_expire'):
self.add_error('expiration_date', _("Expiration Date is not required if this credential does not expire."))
self.add_error('does_not_expire', _("Not required if you provide an Expiration Date."))
raise forms.ValidationError(
_("Conflicting with `Expiration Date` and `Does not expire`. Please specify only one.")
)
return cleaned_data
def clean_expiration_date(self):
expiration_date = self.cleaned_data.get('expiration_date')
if expiration_date and expiration_date <= self.cleaned_data.get('issue_date'):
raise forms.ValidationError(_("Expiration date must be greater than Issue date"))
return expiration_date
class CertificationMediaForm(forms.ModelForm):
class Meta:
model = CertificationMedia
fields = ("file",)
class CertificationWithMediaForm(CertificationForm):
""" CertificationWithMediaForm = CertificationForm + CertificationMediaForm """
# multiple file form field
file = forms.FileField(required=False, widget=forms.ClearableFileInput(attrs={'multiple': True}))
class Meta(CertificationForm.Meta):
fields = CertificationForm.Meta.fields + ['file', ]
def clean_file(self):
file = self.cleaned_data.get('file')
return get_validated_file(file)
# ----------------------------------------------------
# *** Project Forms ***
# ----------------------------------------------------
class ProjectForm(forms.ModelForm):
class Meta:
model = Project
fields = ['title', 'short_description', 'technology', 'start_date',
'end_date', 'currently_working', 'url', 'description']
widgets = {
'start_date': forms.TextInput(attrs={'class': 'form-control', 'id': 'start_date'}),
'end_date': forms.TextInput(attrs={'class': 'form-control', 'id': 'end_date'}),
'technology': forms.Textarea(
attrs={
'class': 'form-control',
'rows': 2,
'cols': 2,
}
),
'description': forms.Textarea(
attrs={
'class': 'form-control',
'rows': 3,
'cols': 3,
}
),
}
def clean(self):
cleaned_data = super(ProjectForm, self).clean()
if cleaned_data.get('end_date', None) is None and not cleaned_data.get('currently_working', None):
self.add_error('end_date', _("End date is required if you are not currently working on this project."))
elif cleaned_data.get('end_date') is not None and cleaned_data.get('currently_working'):
self.add_error('end_date', _("End date is not required if you are currently working on this project."))
self.add_error('currently_working', _("Currently working is not required if you provide an end date."))
raise forms.ValidationError(
_("Conflicting with `End date` and `Currently working`. Please specify only one.")
)
return cleaned_data
def clean_end_date(self):
end_date = self.cleaned_data.get('end_date')
if end_date and end_date <= self.cleaned_data.get('start_date'):
raise forms.ValidationError(_("End date must be greater than start date"))
return end_date
class ProjectMediaForm(forms.ModelForm):
class Meta:
model = ProjectMedia
fields = ("file",)
class ProjectWithMediaForm(ProjectForm):
""" ProjectWithMediaForm = ProjectForm + ProjectMediaForm """
# multiple file form field
file = forms.FileField(required=False, widget=forms.ClearableFileInput(attrs={'multiple': True}))
class Meta(ProjectForm.Meta):
fields = ProjectForm.Meta.fields + ['file', ]
def clean_file(self):
file = self.cleaned_data.get('file')
return get_validated_file(file)
# ----------------------------------------------------
# *** Interest Forms ***
# ----------------------------------------------------
class InterestForm(forms.ModelForm):
class Meta:
model = Interest
fields = ('title', 'icon')
def clean_icon(self):
icon = self.cleaned_data.get('icon')
return get_validated_image(icon)
# ----------------------------------------------------
# *** Testimonial Forms ***
# ----------------------------------------------------
class TestimonialForm(forms.ModelForm):
class Meta:
model = Testimonial
fields = ('name', 'designation', 'image', 'description')
widgets = {
'description': forms.Textarea(
attrs={
'class': 'form-control',
'rows': 3,
'cols': 3,
}
),
}
def clean_image(self):
image = self.cleaned_data.get('image')
return get_validated_image(image)
| from django import forms
from django.utils.translation import gettext_lazy as _
from portfolios.models import (
Skill,
ProfessionalExperience, ProfessionalExperienceMedia,
Education, EducationMedia,
Certification, CertificationMedia,
Project, ProjectMedia,
Interest,
Testimonial
)
from utils.validators import get_validated_file, get_validated_image
# ----------------------------------------------------
# *** Skill Forms ***
# ----------------------------------------------------
class SkillForm(forms.ModelForm):
class Meta:
model = Skill
fields = ('title', 'image')
def clean_image(self):
image = self.cleaned_data.get('image')
return get_validated_image(image)
# ----------------------------------------------------
# *** Professional Experience Forms ***
# ----------------------------------------------------
class ProfessionalExperienceForm(forms.ModelForm):
class Meta:
model = ProfessionalExperience
fields = ['company', 'company_image', 'address', 'designation', 'job_type',
'start_date', 'end_date', 'currently_working', 'description']
widgets = {
'start_date': forms.TextInput(attrs={'class': 'form-control', 'id': 'start_date'}),
'end_date': forms.TextInput(attrs={'class': 'form-control', 'id': 'end_date'}),
'description': forms.Textarea(
attrs={
'class': 'form-control',
'rows': 3,
'cols': 3,
}
),
}
def clean(self):
cleaned_data = super(ProfessionalExperienceForm, self).clean()
if cleaned_data.get('end_date', None) is None and not cleaned_data.get('currently_working', None):
self.add_error('end_date', _("End date is required if you are not currently working here."))
elif cleaned_data.get('end_date') is not None and cleaned_data.get('currently_working'):
self.add_error('end_date', _("End date is not required if you are currently working here."))
self.add_error('currently_working', _("Currently working is not required if you provide an end date."))
raise forms.ValidationError(
_("Conflicting with `End date` and `Currently working`. Please specify only one.")
)
return cleaned_data
def clean_end_date(self):
end_date = self.cleaned_data.get('end_date')
if end_date and end_date <= self.cleaned_data.get('start_date'):
raise forms.ValidationError(_("End date must be greater than start date"))
return end_date
def clean_company_image(self):
company_image = self.cleaned_data.get('company_image')
return get_validated_image(company_image)
class ProfessionalExperienceMediaForm(forms.ModelForm):
class Meta:
model = ProfessionalExperienceMedia
fields = ("file",)
class ProfessionalExperienceWithMediaForm(ProfessionalExperienceForm):
""" ProfessionalExperienceWithMediaForm = ProfessionalExperienceForm + ProfessionalExperienceMediaForm """
# multiple file form field
file = forms.FileField(required=False, widget=forms.ClearableFileInput(attrs={'multiple': True}))
class Meta(ProfessionalExperienceForm.Meta):
fields = ProfessionalExperienceForm.Meta.fields + ['file', ]
def clean_file(self):
file = self.cleaned_data.get('file')
return get_validated_file(file)
# ----------------------------------------------------
# *** Education Forms ***
# ----------------------------------------------------
class EducationForm(forms.ModelForm):
class Meta:
model = Education
fields = ['school', 'degree', 'address', 'field_of_study',
'start_date', 'end_date', 'currently_studying', 'grade', 'activities', 'description']
widgets = {
'start_date': forms.TextInput(attrs={'class': 'form-control', 'id': 'start_date'}),
'end_date': forms.TextInput(attrs={'class': 'form-control', 'id': 'end_date'}),
'activities': forms.Textarea(
attrs={
'class': 'form-control',
'rows': 2,
'cols': 2,
}
),
'description': forms.Textarea(
attrs={
'class': 'form-control',
'rows': 3,
'cols': 3,
}
),
}
def clean(self):
cleaned_data = super(EducationForm, self).clean()
if cleaned_data.get('end_date', None) is None and not cleaned_data.get('currently_studying', None):
self.add_error('end_date', _("End date is required if you are not currently studying here."))
elif cleaned_data.get('end_date') is not None and cleaned_data.get('currently_studying'):
self.add_error('end_date', _("End date is not required if you are currently studying here."))
self.add_error('currently_studying', _("Currently studying is not required if you provide an end date."))
raise forms.ValidationError(
_("Conflicting with `End date` and `Currently studying`. Please specify only one.")
)
return cleaned_data
def clean_end_date(self):
end_date = self.cleaned_data.get('end_date')
if end_date and end_date <= self.cleaned_data.get('start_date'):
raise forms.ValidationError(_("End date must be greater than start date"))
return end_date
class EducationMediaForm(forms.ModelForm):
class Meta:
model = EducationMedia
fields = ("file",)
class EducationWithMediaForm(EducationForm):
""" EducationWithMediaForm = EducationForm + EducationMediaForm """
# multiple file form field
file = forms.FileField(required=False, widget=forms.ClearableFileInput(attrs={'multiple': True}))
class Meta(EducationForm.Meta):
fields = EducationForm.Meta.fields + ['file', ]
def clean_file(self):
file = self.cleaned_data.get('file')
return get_validated_file(file)
# ----------------------------------------------------
# *** Certification Forms ***
# ----------------------------------------------------
class CertificationForm(forms.ModelForm):
class Meta:
model = Certification
fields = ['name', 'organization', 'address', 'issue_date', 'expiration_date',
'does_not_expire', 'credential_id', 'credential_url', 'description']
widgets = {
'issue_date': forms.TextInput(attrs={'class': 'form-control', 'id': 'issue_date'}),
'expiration_date': forms.TextInput(attrs={'class': 'form-control', 'id': 'expiration_date'}),
'description': forms.Textarea(
attrs={
'class': 'form-control',
'rows': 3,
'cols': 3,
}
),
}
def clean(self):
cleaned_data = super(CertificationForm, self).clean()
if cleaned_data.get('expiration_date', None) is None and not cleaned_data.get('does_not_expire', None):
self.add_error('expiration_date', _("Expiration Date is required if this credential does expire."))
elif cleaned_data.get('expiration_date') is not None and cleaned_data.get('does_not_expire'):
self.add_error('expiration_date', _("Expiration Date is not required if this credential does not expire."))
self.add_error('does_not_expire', _("Not required if you provide an Expiration Date."))
raise forms.ValidationError(
_("Conflicting with `Expiration Date` and `Does not expire`. Please specify only one.")
)
return cleaned_data
def clean_expiration_date(self):
expiration_date = self.cleaned_data.get('expiration_date')
if expiration_date and expiration_date <= self.cleaned_data.get('issue_date'):
raise forms.ValidationError(_("Expiration date must be greater than Issue date"))
return expiration_date
class CertificationMediaForm(forms.ModelForm):
class Meta:
model = CertificationMedia
fields = ("file",)
class CertificationWithMediaForm(CertificationForm):
""" CertificationWithMediaForm = CertificationForm + CertificationMediaForm """
# multiple file form field
file = forms.FileField(required=False, widget=forms.ClearableFileInput(attrs={'multiple': True}))
class Meta(CertificationForm.Meta):
fields = CertificationForm.Meta.fields + ['file', ]
def clean_file(self):
file = self.cleaned_data.get('file')
return get_validated_file(file)
# ----------------------------------------------------
# *** Project Forms ***
# ----------------------------------------------------
class ProjectForm(forms.ModelForm):
class Meta:
model = Project
fields = ['title', 'short_description', 'technology', 'start_date',
'end_date', 'currently_working', 'url', 'description']
widgets = {
'start_date': forms.TextInput(attrs={'class': 'form-control', 'id': 'start_date'}),
'end_date': forms.TextInput(attrs={'class': 'form-control', 'id': 'end_date'}),
'technology': forms.Textarea(
attrs={
'class': 'form-control',
'rows': 2,
'cols': 2,
}
),
'description': forms.Textarea(
attrs={
'class': 'form-control',
'rows': 3,
'cols': 3,
}
),
}
def clean(self):
cleaned_data = super(ProjectForm, self).clean()
if cleaned_data.get('end_date', None) is None and not cleaned_data.get('currently_working', None):
self.add_error('end_date', _("End date is required if you are not currently working on this project."))
elif cleaned_data.get('end_date') is not None and cleaned_data.get('currently_working'):
self.add_error('end_date', _("End date is not required if you are currently working on this project."))
self.add_error('currently_working', _("Currently working is not required if you provide an end date."))
raise forms.ValidationError(
_("Conflicting with `End date` and `Currently working`. Please specify only one.")
)
return cleaned_data
def clean_end_date(self):
end_date = self.cleaned_data.get('end_date')
if end_date and end_date <= self.cleaned_data.get('start_date'):
raise forms.ValidationError(_("End date must be greater than start date"))
return end_date
class ProjectMediaForm(forms.ModelForm):
class Meta:
model = ProjectMedia
fields = ("file",)
class ProjectWithMediaForm(ProjectForm):
""" ProjectWithMediaForm = ProjectForm + ProjectMediaForm """
# multiple file form field
file = forms.FileField(required=False, widget=forms.ClearableFileInput(attrs={'multiple': True}))
class Meta(ProjectForm.Meta):
fields = ProjectForm.Meta.fields + ['file', ]
def clean_file(self):
file = self.cleaned_data.get('file')
return get_validated_file(file)
# ----------------------------------------------------
# *** Interest Forms ***
# ----------------------------------------------------
class InterestForm(forms.ModelForm):
class Meta:
model = Interest
fields = ('title', 'icon')
def clean_icon(self):
icon = self.cleaned_data.get('icon')
return get_validated_image(icon)
# ----------------------------------------------------
# *** Testimonial Forms ***
# ----------------------------------------------------
class TestimonialForm(forms.ModelForm):
class Meta:
model = Testimonial
fields = ('name', 'designation', 'image', 'description')
widgets = {
'description': forms.Textarea(
attrs={
'class': 'form-control',
'rows': 3,
'cols': 3,
}
),
}
def clean_image(self):
image = self.cleaned_data.get('image')
return get_validated_image(image)
| en | 0.348947 | # ---------------------------------------------------- # *** Skill Forms *** # ---------------------------------------------------- # ---------------------------------------------------- # *** Professional Experience Forms *** # ---------------------------------------------------- ProfessionalExperienceWithMediaForm = ProfessionalExperienceForm + ProfessionalExperienceMediaForm # multiple file form field # ---------------------------------------------------- # *** Education Forms *** # ---------------------------------------------------- EducationWithMediaForm = EducationForm + EducationMediaForm # multiple file form field # ---------------------------------------------------- # *** Certification Forms *** # ---------------------------------------------------- CertificationWithMediaForm = CertificationForm + CertificationMediaForm # multiple file form field # ---------------------------------------------------- # *** Project Forms *** # ---------------------------------------------------- ProjectWithMediaForm = ProjectForm + ProjectMediaForm # multiple file form field # ---------------------------------------------------- # *** Interest Forms *** # ---------------------------------------------------- # ---------------------------------------------------- # *** Testimonial Forms *** # ---------------------------------------------------- | 2.187017 | 2 |
puppy_interactions/interactions/models.py | starlingiot/puppy_interactions | 0 | 6613913 | <reponame>starlingiot/puppy_interactions
from django.db import models
import uuid
class InteractionBaseModel(models.Model):
guid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class Person(InteractionBaseModel):
# Slack `user-id` or string representation (for interactions without @notation)
user_id = models.CharField(max_length=255, unique=True)
display_name = models.CharField(max_length=255, blank=True)
def __str__(self):
if self.display_name != "":
return self.display_name
else:
return self.user_id
class Interaction(InteractionBaseModel):
"""
models an interaction between two people. it can be ranked as a `+` or `-`.
one person can have many interactions within a conversation. for example, in a
3-person interaction (conversation1).
* personA -> personB = conversation1.interaction1
* personA -> personC = conversation1.interaction2
* personB -> personC = conversation1.interaction3
* personC -> personA = conversation1.interaction4
* ...
because we can't explicitly link conversations reported by different people, we
will see multiple conversation UUIDs for a single conversation. that means a single
conversation may present as multiple conversations without some temporal- and user-
based heuristics.
"""
# ensure this is set the same for all interactions in the conversation
conversation = models.UUIDField(default=uuid.uuid4)
rater = models.ForeignKey('interactions.Person', on_delete=models.PROTECT,
related_name='rater_interactions')
ratee = models.ForeignKey('interactions.Person', on_delete=models.PROTECT,
related_name='ratee_interactions', null=True)
POSITIVE = "+"
NEGATIVE = "-"
RATING_CHOICES = (
(POSITIVE, "Positive"),
(NEGATIVE, "Negative"),
)
rating = models.CharField(max_length=1, choices=RATING_CHOICES)
@staticmethod
def map_to_icon(rating: str) -> str:
"""return a Slack emoji from a rating"""
m = {Interaction.POSITIVE: ":slightly_smiling_face:",
Interaction.NEGATIVE: ":white_frowning_face:"}
return m.get(rating, ":grey_question:")
def __str__(self):
icon = self.map_to_icon(self.rating)
return f"*{self.ratee}*\t{self.created.strftime('%d %b %Y')}\t*{icon}*"
| from django.db import models
import uuid
class InteractionBaseModel(models.Model):
guid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class Person(InteractionBaseModel):
# Slack `user-id` or string representation (for interactions without @notation)
user_id = models.CharField(max_length=255, unique=True)
display_name = models.CharField(max_length=255, blank=True)
def __str__(self):
if self.display_name != "":
return self.display_name
else:
return self.user_id
class Interaction(InteractionBaseModel):
"""
models an interaction between two people. it can be ranked as a `+` or `-`.
one person can have many interactions within a conversation. for example, in a
3-person interaction (conversation1).
* personA -> personB = conversation1.interaction1
* personA -> personC = conversation1.interaction2
* personB -> personC = conversation1.interaction3
* personC -> personA = conversation1.interaction4
* ...
because we can't explicitly link conversations reported by different people, we
will see multiple conversation UUIDs for a single conversation. that means a single
conversation may present as multiple conversations without some temporal- and user-
based heuristics.
"""
# ensure this is set the same for all interactions in the conversation
conversation = models.UUIDField(default=uuid.uuid4)
rater = models.ForeignKey('interactions.Person', on_delete=models.PROTECT,
related_name='rater_interactions')
ratee = models.ForeignKey('interactions.Person', on_delete=models.PROTECT,
related_name='ratee_interactions', null=True)
POSITIVE = "+"
NEGATIVE = "-"
RATING_CHOICES = (
(POSITIVE, "Positive"),
(NEGATIVE, "Negative"),
)
rating = models.CharField(max_length=1, choices=RATING_CHOICES)
@staticmethod
def map_to_icon(rating: str) -> str:
"""return a Slack emoji from a rating"""
m = {Interaction.POSITIVE: ":slightly_smiling_face:",
Interaction.NEGATIVE: ":white_frowning_face:"}
return m.get(rating, ":grey_question:")
def __str__(self):
icon = self.map_to_icon(self.rating)
return f"*{self.ratee}*\t{self.created.strftime('%d %b %Y')}\t*{icon}*" | en | 0.877701 | # Slack `user-id` or string representation (for interactions without @notation) models an interaction between two people. it can be ranked as a `+` or `-`. one person can have many interactions within a conversation. for example, in a 3-person interaction (conversation1). * personA -> personB = conversation1.interaction1 * personA -> personC = conversation1.interaction2 * personB -> personC = conversation1.interaction3 * personC -> personA = conversation1.interaction4 * ... because we can't explicitly link conversations reported by different people, we will see multiple conversation UUIDs for a single conversation. that means a single conversation may present as multiple conversations without some temporal- and user- based heuristics. # ensure this is set the same for all interactions in the conversation return a Slack emoji from a rating | 2.752632 | 3 |
gunnery/account/modal.py | timgates42/gunnery | 314 | 6613914 | <gh_stars>100-1000
from django.contrib.auth import get_user_model
from core.modal import BaseModal, ModalPermissionException
from account.models import DepartmentGroup
from core.models import Department
from .forms import account_create_form, UserForm, DepartmentGroupForm, UserSystemForm
_user = get_user_model()
class BaseAccountModal(BaseModal):
def get_form_creator(self):
return account_create_form
def _on_form_create_user(self):
if not self.form.instance.id:
self.form.fields['password'].required = True
if self.request.user.is_superuser:
self.form.fields['groups'].department_prefix = True
groups = DepartmentGroup.objects.all()
else:
groups = DepartmentGroup.objects.filter(department_id = self.request.current_department_id)
self.form.set_groups(groups)
class UserModal(BaseAccountModal):
form = UserForm
form_name = 'user'
def permission_check(self, action):
if not self.request.user.has_perm('core.change_department', Department(id=self.request.current_department_id)):
raise ModalPermissionException
def on_form_create(self):
self._on_form_create_user()
def on_view(self):
self.data['model_name'] = 'User'
def on_before_save(self):
instance = self.form.instance
instance.username = instance.email
if len(instance.password):
instance.set_password(instance.password)
else:
instance.password = _user.objects.get(pk=instance.id).password
class UsersystemModal(BaseAccountModal):
form = UserSystemForm
form_name = 'usersystem'
def permission_check(self, action):
if not self.request.user.is_superuser:
raise ModalPermissionException
def on_form_create(self):
self._on_form_create_user()
class GroupModal(BaseAccountModal):
form = DepartmentGroupForm
form_name = 'group'
def permission_check(self, action):
if not self.request.user.has_perm('core.change_department', Department(id=self.request.current_department_id)):
raise ModalPermissionException
def on_before_save(self):
self.form.instance.department_id = self.request.current_department_id
def on_create(self):
DepartmentGroup.assign_department_perms(self.instance, Department(id=self.request.current_department_id))
| from django.contrib.auth import get_user_model
from core.modal import BaseModal, ModalPermissionException
from account.models import DepartmentGroup
from core.models import Department
from .forms import account_create_form, UserForm, DepartmentGroupForm, UserSystemForm
_user = get_user_model()
class BaseAccountModal(BaseModal):
def get_form_creator(self):
return account_create_form
def _on_form_create_user(self):
if not self.form.instance.id:
self.form.fields['password'].required = True
if self.request.user.is_superuser:
self.form.fields['groups'].department_prefix = True
groups = DepartmentGroup.objects.all()
else:
groups = DepartmentGroup.objects.filter(department_id = self.request.current_department_id)
self.form.set_groups(groups)
class UserModal(BaseAccountModal):
form = UserForm
form_name = 'user'
def permission_check(self, action):
if not self.request.user.has_perm('core.change_department', Department(id=self.request.current_department_id)):
raise ModalPermissionException
def on_form_create(self):
self._on_form_create_user()
def on_view(self):
self.data['model_name'] = 'User'
def on_before_save(self):
instance = self.form.instance
instance.username = instance.email
if len(instance.password):
instance.set_password(instance.password)
else:
instance.password = _user.objects.get(pk=instance.id).password
class UsersystemModal(BaseAccountModal):
form = UserSystemForm
form_name = 'usersystem'
def permission_check(self, action):
if not self.request.user.is_superuser:
raise ModalPermissionException
def on_form_create(self):
self._on_form_create_user()
class GroupModal(BaseAccountModal):
form = DepartmentGroupForm
form_name = 'group'
def permission_check(self, action):
if not self.request.user.has_perm('core.change_department', Department(id=self.request.current_department_id)):
raise ModalPermissionException
def on_before_save(self):
self.form.instance.department_id = self.request.current_department_id
def on_create(self):
DepartmentGroup.assign_department_perms(self.instance, Department(id=self.request.current_department_id)) | none | 1 | 2.026825 | 2 | |
replys/models.py | NicolasMuras/Lookdaluv | 1 | 6613915 | <filename>replys/models.py
from django.db import models
from simple_history.models import HistoricalRecords
from answers.models import Answer
from core.models import BaseModel
class Reply(BaseModel):
class ReplyCharacter(models.IntegerChoices):
POSITIVE = 1, "POSITIVE"
NEUTRAL = 2, "NEUTRAL"
NEGATIVE = 3, "NEGATIVE"
TRAP = 4, "TRAP"
reply_character = models.PositiveSmallIntegerField(
'Reply Character',
choices=ReplyCharacter.choices,
default=ReplyCharacter.NEUTRAL
)
message = models.CharField('Reply Message', max_length=100, blank = False, null = False)
answer = models.ForeignKey(Answer, on_delete=models.CASCADE, verbose_name='Answer', related_name='replys')
historical = HistoricalRecords()
@property
def _history_user(self):
return self.changed_by
@_history_user.setter
def _history_user(self, value):
self.changed_by = value
class Meta:
verbose_name = 'Reply'
verbose_name_plural = 'Replys'
def __str__(self):
return self.message
class Vote(BaseModel):
class VoteCharacter(models.IntegerChoices):
POSITIVE = 1, "POSITIVE"
NEUTRAL = 2, "NEUTRAL"
NEGATIVE = 3, "NEGATIVE"
TRAP = 4, "TRAP"
vote_character = models.PositiveSmallIntegerField(
'Vote Character',
choices=VoteCharacter.choices,
default=VoteCharacter.NEUTRAL
)
reply = models.ForeignKey(Reply, on_delete=models.CASCADE, verbose_name='Reply', related_name='vote')
historical = HistoricalRecords()
@property
def _history_user(self):
return self.changed_by
@_history_user.setter
def _history_user(self, value):
self.changed_by = value
class Meta:
verbose_name = 'Vote'
verbose_name_plural = 'Votes'
| <filename>replys/models.py
from django.db import models
from simple_history.models import HistoricalRecords
from answers.models import Answer
from core.models import BaseModel
class Reply(BaseModel):
class ReplyCharacter(models.IntegerChoices):
POSITIVE = 1, "POSITIVE"
NEUTRAL = 2, "NEUTRAL"
NEGATIVE = 3, "NEGATIVE"
TRAP = 4, "TRAP"
reply_character = models.PositiveSmallIntegerField(
'Reply Character',
choices=ReplyCharacter.choices,
default=ReplyCharacter.NEUTRAL
)
message = models.CharField('Reply Message', max_length=100, blank = False, null = False)
answer = models.ForeignKey(Answer, on_delete=models.CASCADE, verbose_name='Answer', related_name='replys')
historical = HistoricalRecords()
@property
def _history_user(self):
return self.changed_by
@_history_user.setter
def _history_user(self, value):
self.changed_by = value
class Meta:
verbose_name = 'Reply'
verbose_name_plural = 'Replys'
def __str__(self):
return self.message
class Vote(BaseModel):
class VoteCharacter(models.IntegerChoices):
POSITIVE = 1, "POSITIVE"
NEUTRAL = 2, "NEUTRAL"
NEGATIVE = 3, "NEGATIVE"
TRAP = 4, "TRAP"
vote_character = models.PositiveSmallIntegerField(
'Vote Character',
choices=VoteCharacter.choices,
default=VoteCharacter.NEUTRAL
)
reply = models.ForeignKey(Reply, on_delete=models.CASCADE, verbose_name='Reply', related_name='vote')
historical = HistoricalRecords()
@property
def _history_user(self):
return self.changed_by
@_history_user.setter
def _history_user(self, value):
self.changed_by = value
class Meta:
verbose_name = 'Vote'
verbose_name_plural = 'Votes'
| none | 1 | 2.222054 | 2 | |
Files/pvpBot.py | Elkantar-git/pogo | 2 | 6613916 | # PvpBot
# BATTLE SCEEN
# Start at the battle sceen
# Click on BATTLE
# Click choose League
# Click Go
# FIGHT
# Spam first attack
# Click on the charged attack
# Complete the disign
#
#
# If pink pixel apeard => Shield
#
# Screenshot
# Spam first, charged attack and shield on [290:800]
# Detect the color of the charged attack and lookAt()
#
# Click on the Design in 5s
# Spam
# NEXT FIGHT OR REWARDS
# Click on Next
# Click on League
# Click Go
# For rewards Click on Orange pixel
import cv2
import numpy as np
import pyautogui
from pyautogui import *
import imutils
def lookAt(forWhat): # x, y = Full sceen
if forWhat == "Battle":
return 720, 1700, 1, 1
elif forWhat == "pix":
return 572, 1542, 1, 1
elif forWhat == "atkDragon":
return 100, 400, 300, 1040
elif forWhat == "atkEau":
return xmin, xmax, ymin, ymax
elif forWhat == "atkFeu":
return xmin, xmax, ymin, ymax
elif forWhat == "atk":
return xmin, xmax, ymin, ymax
# def screenshot(xmin, xmax, ymin, ymax):
# img = pyautogui.screenshot(region=(xmin, xmax, ymin, ymax))
# width, height = img.size()
# # img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
# # img = np.array(img)
# # img = img[ymin:ymax, xmin:xmax]
# # img = imutils.resize(img, width=(xmax-xmin)/2, height=(ymax-ymin)/2)
# return img
# BATTLE SCEEN
xmin, xmax, ymin, ymax = lookAt("Battle")
img = pyautogui.screenshot(region=(xmin, xmax, ymin, ymax))
width, height = img.size
# print(width, height)
for x in range(0, width, 5):
for y in range(0, height, 5):
r, g, b, w = img.getpixel((x, y))
# print(r, g, b, w)
if g == 208:
pyautogui.click(xmin/2, xmax/2)
time.sleep(0.5)
pyautogui.click(xmin/2, xmax/2)
time.sleep(5)
pyautogui.click(400, 400)
time.sleep(5)
pyautogui.click(430, 800)
# Green battle (67, 208, 164, 255)
# BATTLE
while True:
# px= pyautogui.pixel(572,1542)
# print(px)
# if pyautogui.pixel(572,1542)[0] > 220 and pyautogui.pixel(572,1542)[1] > 220 and pyautogui.pixel(572,1542)[2] > 220:
# xmin, xmax, ymin, ymax = lookAt("atkDragon")
img = pyautogui.screenshot(region=(100, 400, 300, 1040))
width, height = img.size
# print(width, height)
for x in range(0, width, 5):
for y in range(0, height, 5):
r, g, b, w = img.getpixel((x, y))
# print(r, g, b)
if ((r in range(3, 9)) and (g in range(125, 131)) and (b in range(187, 193))):
# if r == 6 and g == 128 and b == 190:
pyautogui.moveTo(x+50, y+200)
print(x, y)
# time.sleep(0.05)
break
| # PvpBot
# BATTLE SCEEN
# Start at the battle sceen
# Click on BATTLE
# Click choose League
# Click Go
# FIGHT
# Spam first attack
# Click on the charged attack
# Complete the disign
#
#
# If pink pixel apeard => Shield
#
# Screenshot
# Spam first, charged attack and shield on [290:800]
# Detect the color of the charged attack and lookAt()
#
# Click on the Design in 5s
# Spam
# NEXT FIGHT OR REWARDS
# Click on Next
# Click on League
# Click Go
# For rewards Click on Orange pixel
import cv2
import numpy as np
import pyautogui
from pyautogui import *
import imutils
def lookAt(forWhat): # x, y = Full sceen
if forWhat == "Battle":
return 720, 1700, 1, 1
elif forWhat == "pix":
return 572, 1542, 1, 1
elif forWhat == "atkDragon":
return 100, 400, 300, 1040
elif forWhat == "atkEau":
return xmin, xmax, ymin, ymax
elif forWhat == "atkFeu":
return xmin, xmax, ymin, ymax
elif forWhat == "atk":
return xmin, xmax, ymin, ymax
# def screenshot(xmin, xmax, ymin, ymax):
# img = pyautogui.screenshot(region=(xmin, xmax, ymin, ymax))
# width, height = img.size()
# # img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
# # img = np.array(img)
# # img = img[ymin:ymax, xmin:xmax]
# # img = imutils.resize(img, width=(xmax-xmin)/2, height=(ymax-ymin)/2)
# return img
# BATTLE SCEEN
xmin, xmax, ymin, ymax = lookAt("Battle")
img = pyautogui.screenshot(region=(xmin, xmax, ymin, ymax))
width, height = img.size
# print(width, height)
for x in range(0, width, 5):
for y in range(0, height, 5):
r, g, b, w = img.getpixel((x, y))
# print(r, g, b, w)
if g == 208:
pyautogui.click(xmin/2, xmax/2)
time.sleep(0.5)
pyautogui.click(xmin/2, xmax/2)
time.sleep(5)
pyautogui.click(400, 400)
time.sleep(5)
pyautogui.click(430, 800)
# Green battle (67, 208, 164, 255)
# BATTLE
while True:
# px= pyautogui.pixel(572,1542)
# print(px)
# if pyautogui.pixel(572,1542)[0] > 220 and pyautogui.pixel(572,1542)[1] > 220 and pyautogui.pixel(572,1542)[2] > 220:
# xmin, xmax, ymin, ymax = lookAt("atkDragon")
img = pyautogui.screenshot(region=(100, 400, 300, 1040))
width, height = img.size
# print(width, height)
for x in range(0, width, 5):
for y in range(0, height, 5):
r, g, b, w = img.getpixel((x, y))
# print(r, g, b)
if ((r in range(3, 9)) and (g in range(125, 131)) and (b in range(187, 193))):
# if r == 6 and g == 128 and b == 190:
pyautogui.moveTo(x+50, y+200)
print(x, y)
# time.sleep(0.05)
break
| en | 0.430878 | # PvpBot # BATTLE SCEEN # Start at the battle sceen # Click on BATTLE # Click choose League # Click Go # FIGHT # Spam first attack # Click on the charged attack # Complete the disign # # # If pink pixel apeard => Shield # # Screenshot # Spam first, charged attack and shield on [290:800] # Detect the color of the charged attack and lookAt() # # Click on the Design in 5s # Spam # NEXT FIGHT OR REWARDS # Click on Next # Click on League # Click Go # For rewards Click on Orange pixel # x, y = Full sceen # def screenshot(xmin, xmax, ymin, ymax): # img = pyautogui.screenshot(region=(xmin, xmax, ymin, ymax)) # width, height = img.size() # # img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR) # # img = np.array(img) # # img = img[ymin:ymax, xmin:xmax] # # img = imutils.resize(img, width=(xmax-xmin)/2, height=(ymax-ymin)/2) # return img # BATTLE SCEEN # print(width, height) # print(r, g, b, w) # Green battle (67, 208, 164, 255) # BATTLE # px= pyautogui.pixel(572,1542) # print(px) # if pyautogui.pixel(572,1542)[0] > 220 and pyautogui.pixel(572,1542)[1] > 220 and pyautogui.pixel(572,1542)[2] > 220: # xmin, xmax, ymin, ymax = lookAt("atkDragon") # print(width, height) # print(r, g, b) # if r == 6 and g == 128 and b == 190: # time.sleep(0.05) | 2.84493 | 3 |
test/unit/test_version_parse.py | johan-boule/fastcov | 71 | 6613917 | <reponame>johan-boule/fastcov
#!/usr/bin/env python3
"""
Author: <NAME>
Make sure fastcov correctly handles gcov version parsing
"""
import fastcov
def test_ubuntu_18_04():
version_str = "gcov (Ubuntu 7.3.0-27ubuntu1~18.04) 7.3.0"
assert fastcov.parseVersionFromLine(version_str) == (7,3,0)
def test_ubuntu_test_ppa():
version_str = "gcov (Ubuntu 9.1.0-2ubuntu2~16.04) 9.1.0"
assert fastcov.parseVersionFromLine(version_str) == (9,1,0)
def test_experimental():
version_str = "gcov (GCC) 9.0.1 20190401 (experimental)"
assert fastcov.parseVersionFromLine(version_str) == (9,0,1)
def test_upstream():
version_str = "gcov (GCC) 9.1.0"
assert fastcov.parseVersionFromLine(version_str) == (9,1,0)
def test_no_version():
version_str = "gcov (GCC)"
assert fastcov.parseVersionFromLine(version_str) == (0,0,0) | #!/usr/bin/env python3
"""
Author: <NAME>
Make sure fastcov correctly handles gcov version parsing
"""
import fastcov
def test_ubuntu_18_04():
version_str = "gcov (Ubuntu 7.3.0-27ubuntu1~18.04) 7.3.0"
assert fastcov.parseVersionFromLine(version_str) == (7,3,0)
def test_ubuntu_test_ppa():
version_str = "gcov (Ubuntu 9.1.0-2ubuntu2~16.04) 9.1.0"
assert fastcov.parseVersionFromLine(version_str) == (9,1,0)
def test_experimental():
version_str = "gcov (GCC) 9.0.1 20190401 (experimental)"
assert fastcov.parseVersionFromLine(version_str) == (9,0,1)
def test_upstream():
version_str = "gcov (GCC) 9.1.0"
assert fastcov.parseVersionFromLine(version_str) == (9,1,0)
def test_no_version():
version_str = "gcov (GCC)"
assert fastcov.parseVersionFromLine(version_str) == (0,0,0) | en | 0.418059 | #!/usr/bin/env python3 Author: <NAME> Make sure fastcov correctly handles gcov version parsing | 2.342363 | 2 |
Train_DURCAN.py | guanghaoyin/Camera-ScreenSR | 0 | 6613918 | import torch
import torch.nn as nn
from torch.autograd import Variable
import imageio
import Data.utils as utils
import Loss
from Model.Net import DURCAN
from option import args
from importlib import import_module
from Data.dataloader import MSDataLoader
from decimal import Decimal
from Loss import visual_loss
from tqdm import tqdm
from Metrics.cal_PSNR_SSIM import cal_psnr_tensor,cal_ssim_tensor
torch.manual_seed(args.seed)
checkpoint = utils.checkpoint(args)
loss = Loss.Loss(args,checkpoint)
module_train = import_module('Data.' + args.data_train)
module_test = import_module('Data.' + args.data_test)
trainset = getattr(module_train, args.data_train)(args)
testset = getattr(module_test, args.data_test)(args,train = False)
loader_train = MSDataLoader(args,trainset,batch_size=args.batch_size,shuffle=True)
loader_test = MSDataLoader(args,testset,batch_size=1,shuffle=False)
netSR = DURCAN(args)
# TVLoss = loss.tv_loss.TVLoss()
# TVLoss = TVLoss.to(torch.device('cuda:'+str(args.GPU_ID)))
if args.load_model == '.':
netSR = netSR.to(torch.device('cuda:'+str(args.GPU_ID)))
if not args.cpu and args.n_GPUs > 1:
netSR = nn.DataParallel(netSR, range(args.n_GPUs)).cuda()
netSR = netSR.module
else:
if args.n_GPUs == 1:
# netSR = utils.load_GPU(netSR,args.load_model + '/model/DURCAN_best.pt')
# netSR = utils.load_GPU(netSR,args.load_model + '/model/Important_DURCAN_latest.pt')
netSR = utils.load_GPU(netSR, args.load_model + '/model/DURCAN_12_1_comp_tanh_latest.pt')
# netSR = utils.load_GPU(netSR,args.load_model + '/model/DURCAN_12_comp_latest.pt')
# netSR = utils.load_GPU(netSR,args.load_model + '/model/DURCAN_12_comp_tanh_latest.pt')
# netSR = utils.load_GPU(netSR,args.load_model + '/model/DURCAN_latestX4.pt')
netSR = netSR.to(torch.device('cuda:'+str(args.GPU_ID)))
else:
netSR = nn.DataParallel(netSR, range(args.n_GPUs)).cuda()
netSR = utils.load_GPUS(netSR, args.load_model + '/model/DURCAN_best.pt')
# netSR = utils.load_GPUS(netSR, args.load_model + '/model/DURCAN_latestX4.pt')
netSR = netSR.module
optimizerSR = utils.make_optimizer(args, netSR)
schedulerSR = utils.make_scheduler(args, optimizerSR)
print(netSR)
visualizer_env = 'DURCAN' + '_' + args.data_train + '_' + args.loss
if args.load_model != '.': visualizer_env += '_load'
visualizer = visual_loss.Visualizer(env=visualizer_env)
train_total_step = len(loader_train)
test_total_step = len(loader_test)
SRloss_list = []
PSNR_list = []
for epoch in range(args.epochs):
loss.start_log()
loss.step()
timer_data, timer_model = utils.timer(), utils.timer()
schedulerSR.step()
learning_rateSR = schedulerSR.get_lr()[0]
checkpoint.write_log('[Epoch {}]\tLearning rateSR: {:.2e}'.format(epoch, Decimal(learning_rateSR)))
for batch, (lr, hr, idx_scale) in enumerate(loader_train):
netSR.train()
lr, hr = utils.tensor_prepare([lr, hr],args)
lr.hr = Variable(lr.cuda()),Variable(hr.cuda())
timer_data.hold()
timer_model.tic()
optimizerSR.zero_grad()
SR = netSR(lr)
# SR = utils.unnormalize(SR)
# update SR network
# netSR.zero_grad()
# tvloss = TVLoss(SR)*(2e-9)
# SR_loss = loss(SR, hr) + tvloss
SR_loss = loss(SR, hr)
error_last = loss.log[-1, -1]
if SR_loss.item() < args.skip_threshold * error_last:
SR_loss_item = SR_loss.item()
SRloss_list.append(SR_loss_item)
SR_loss.backward()
optimizerSR.step()
# tvloss.backward()
print('Epoch [{}/{}],Step[{}/{}]'.format(epoch + 1, args.epochs, batch + 1, train_total_step))
print('LossSR:{}'.format(SR_loss_item))
visualizer.plot_many_stack({visualizer_env + '_SRloss': SR_loss_item})
else:
print('Skip this batch {}! (Loss: {})'.format(
batch + 1, loss.item()
))
timer_model.hold()
# if batch == 20:
if batch == 1:
# and epoch%2 == 0:
checkpoint.write_log('\nEvaluation:')
# checkpoint.add_log(torch.zeros(1, len(args.scale)))
print('start evaluation')
netSR.eval()
timer_test = utils.timer()
with torch.no_grad():
eval_psnr = 0
eval_ssim = 0
for idx_img, (lr, hr, filename) in enumerate(tqdm(loader_test, ncols=80)):
filename = filename[0]
no_eval = (hr.nelement() == 1)
if not no_eval:
lr, hr = utils.tensor_prepare([lr, hr], args)
else:
lr = utils.tensor_prepare([lr], args)[0]
SR = netSR(lr)
SR = utils.unnormalize(SR)
hr = utils.unnormalize(hr)
# SR = utils.quantize(SR,args.rgb_range)
save_list = [SR]
# save_list = []
single_PSNR = cal_psnr_tensor(SR, hr, int(args.scale), args.rgb_range)
single_SSIM = cal_ssim_tensor(SR, hr, args.rgb_range)
print('\ntest img {} ssim: {}'.format(filename, single_SSIM))
print('\ntest img {} pnsr: {}'.format(filename, single_PSNR))
if not no_eval:
eval_psnr += single_PSNR
eval_ssim += single_SSIM
# save_list.extend([lr, hr])
if args.save_results:
# and epoch%20 == 0:
checkpoint.save_results(epoch, filename, save_list, args.scale,postfix=('DUR_SR', 'LR', 'HR'))
netSR.save(checkpoint.dir, epoch)
print('test step:[{}/{}]\n'.format(idx_img + 1, test_total_step))
ave_psnr = eval_psnr / len(loader_test)
ave_ssim = eval_ssim / len(loader_test)
# checkpoint.log = ave_psnr
PSNR_list.append(ave_psnr)
with open(checkpoint.dir + '/psnr_reford.txt', 'w') as f:
f.write(str(PSNR_list))
checkpoint.write_log(
'[{} x{}]\tPSNR\tSSIM: {:.3f}'.format(
args.data_test,
args.scale,
ave_psnr,
ave_ssim,
)
)
checkpoint.write_log(
'Total time: {:.2f}s\n'.format(timer_test.toc()), refresh=True
)
visualizer.plot_many_stack({'evaluate_psnr': ave_psnr})
visualizer.plot_many_stack({'evaluate_ssim': ave_ssim})
print('Evaluation psnr of the model is {}'.format(ave_psnr))
print('Evaluation ssim of the model is {}'.format(ave_ssim))
torch.cuda.empty_cache()
timer_data.tic()
loss.end_log(len(loader_train))
| import torch
import torch.nn as nn
from torch.autograd import Variable
import imageio
import Data.utils as utils
import Loss
from Model.Net import DURCAN
from option import args
from importlib import import_module
from Data.dataloader import MSDataLoader
from decimal import Decimal
from Loss import visual_loss
from tqdm import tqdm
from Metrics.cal_PSNR_SSIM import cal_psnr_tensor,cal_ssim_tensor
torch.manual_seed(args.seed)
checkpoint = utils.checkpoint(args)
loss = Loss.Loss(args,checkpoint)
module_train = import_module('Data.' + args.data_train)
module_test = import_module('Data.' + args.data_test)
trainset = getattr(module_train, args.data_train)(args)
testset = getattr(module_test, args.data_test)(args,train = False)
loader_train = MSDataLoader(args,trainset,batch_size=args.batch_size,shuffle=True)
loader_test = MSDataLoader(args,testset,batch_size=1,shuffle=False)
netSR = DURCAN(args)
# TVLoss = loss.tv_loss.TVLoss()
# TVLoss = TVLoss.to(torch.device('cuda:'+str(args.GPU_ID)))
if args.load_model == '.':
netSR = netSR.to(torch.device('cuda:'+str(args.GPU_ID)))
if not args.cpu and args.n_GPUs > 1:
netSR = nn.DataParallel(netSR, range(args.n_GPUs)).cuda()
netSR = netSR.module
else:
if args.n_GPUs == 1:
# netSR = utils.load_GPU(netSR,args.load_model + '/model/DURCAN_best.pt')
# netSR = utils.load_GPU(netSR,args.load_model + '/model/Important_DURCAN_latest.pt')
netSR = utils.load_GPU(netSR, args.load_model + '/model/DURCAN_12_1_comp_tanh_latest.pt')
# netSR = utils.load_GPU(netSR,args.load_model + '/model/DURCAN_12_comp_latest.pt')
# netSR = utils.load_GPU(netSR,args.load_model + '/model/DURCAN_12_comp_tanh_latest.pt')
# netSR = utils.load_GPU(netSR,args.load_model + '/model/DURCAN_latestX4.pt')
netSR = netSR.to(torch.device('cuda:'+str(args.GPU_ID)))
else:
netSR = nn.DataParallel(netSR, range(args.n_GPUs)).cuda()
netSR = utils.load_GPUS(netSR, args.load_model + '/model/DURCAN_best.pt')
# netSR = utils.load_GPUS(netSR, args.load_model + '/model/DURCAN_latestX4.pt')
netSR = netSR.module
optimizerSR = utils.make_optimizer(args, netSR)
schedulerSR = utils.make_scheduler(args, optimizerSR)
print(netSR)
visualizer_env = 'DURCAN' + '_' + args.data_train + '_' + args.loss
if args.load_model != '.': visualizer_env += '_load'
visualizer = visual_loss.Visualizer(env=visualizer_env)
train_total_step = len(loader_train)
test_total_step = len(loader_test)
SRloss_list = []
PSNR_list = []
for epoch in range(args.epochs):
loss.start_log()
loss.step()
timer_data, timer_model = utils.timer(), utils.timer()
schedulerSR.step()
learning_rateSR = schedulerSR.get_lr()[0]
checkpoint.write_log('[Epoch {}]\tLearning rateSR: {:.2e}'.format(epoch, Decimal(learning_rateSR)))
for batch, (lr, hr, idx_scale) in enumerate(loader_train):
netSR.train()
lr, hr = utils.tensor_prepare([lr, hr],args)
lr.hr = Variable(lr.cuda()),Variable(hr.cuda())
timer_data.hold()
timer_model.tic()
optimizerSR.zero_grad()
SR = netSR(lr)
# SR = utils.unnormalize(SR)
# update SR network
# netSR.zero_grad()
# tvloss = TVLoss(SR)*(2e-9)
# SR_loss = loss(SR, hr) + tvloss
SR_loss = loss(SR, hr)
error_last = loss.log[-1, -1]
if SR_loss.item() < args.skip_threshold * error_last:
SR_loss_item = SR_loss.item()
SRloss_list.append(SR_loss_item)
SR_loss.backward()
optimizerSR.step()
# tvloss.backward()
print('Epoch [{}/{}],Step[{}/{}]'.format(epoch + 1, args.epochs, batch + 1, train_total_step))
print('LossSR:{}'.format(SR_loss_item))
visualizer.plot_many_stack({visualizer_env + '_SRloss': SR_loss_item})
else:
print('Skip this batch {}! (Loss: {})'.format(
batch + 1, loss.item()
))
timer_model.hold()
# if batch == 20:
if batch == 1:
# and epoch%2 == 0:
checkpoint.write_log('\nEvaluation:')
# checkpoint.add_log(torch.zeros(1, len(args.scale)))
print('start evaluation')
netSR.eval()
timer_test = utils.timer()
with torch.no_grad():
eval_psnr = 0
eval_ssim = 0
for idx_img, (lr, hr, filename) in enumerate(tqdm(loader_test, ncols=80)):
filename = filename[0]
no_eval = (hr.nelement() == 1)
if not no_eval:
lr, hr = utils.tensor_prepare([lr, hr], args)
else:
lr = utils.tensor_prepare([lr], args)[0]
SR = netSR(lr)
SR = utils.unnormalize(SR)
hr = utils.unnormalize(hr)
# SR = utils.quantize(SR,args.rgb_range)
save_list = [SR]
# save_list = []
single_PSNR = cal_psnr_tensor(SR, hr, int(args.scale), args.rgb_range)
single_SSIM = cal_ssim_tensor(SR, hr, args.rgb_range)
print('\ntest img {} ssim: {}'.format(filename, single_SSIM))
print('\ntest img {} pnsr: {}'.format(filename, single_PSNR))
if not no_eval:
eval_psnr += single_PSNR
eval_ssim += single_SSIM
# save_list.extend([lr, hr])
if args.save_results:
# and epoch%20 == 0:
checkpoint.save_results(epoch, filename, save_list, args.scale,postfix=('DUR_SR', 'LR', 'HR'))
netSR.save(checkpoint.dir, epoch)
print('test step:[{}/{}]\n'.format(idx_img + 1, test_total_step))
ave_psnr = eval_psnr / len(loader_test)
ave_ssim = eval_ssim / len(loader_test)
# checkpoint.log = ave_psnr
PSNR_list.append(ave_psnr)
with open(checkpoint.dir + '/psnr_reford.txt', 'w') as f:
f.write(str(PSNR_list))
checkpoint.write_log(
'[{} x{}]\tPSNR\tSSIM: {:.3f}'.format(
args.data_test,
args.scale,
ave_psnr,
ave_ssim,
)
)
checkpoint.write_log(
'Total time: {:.2f}s\n'.format(timer_test.toc()), refresh=True
)
visualizer.plot_many_stack({'evaluate_psnr': ave_psnr})
visualizer.plot_many_stack({'evaluate_ssim': ave_ssim})
print('Evaluation psnr of the model is {}'.format(ave_psnr))
print('Evaluation ssim of the model is {}'.format(ave_ssim))
torch.cuda.empty_cache()
timer_data.tic()
loss.end_log(len(loader_train))
| en | 0.293974 | # TVLoss = loss.tv_loss.TVLoss() # TVLoss = TVLoss.to(torch.device('cuda:'+str(args.GPU_ID))) # netSR = utils.load_GPU(netSR,args.load_model + '/model/DURCAN_best.pt') # netSR = utils.load_GPU(netSR,args.load_model + '/model/Important_DURCAN_latest.pt') # netSR = utils.load_GPU(netSR,args.load_model + '/model/DURCAN_12_comp_latest.pt') # netSR = utils.load_GPU(netSR,args.load_model + '/model/DURCAN_12_comp_tanh_latest.pt') # netSR = utils.load_GPU(netSR,args.load_model + '/model/DURCAN_latestX4.pt') # netSR = utils.load_GPUS(netSR, args.load_model + '/model/DURCAN_latestX4.pt') # SR = utils.unnormalize(SR) # update SR network # netSR.zero_grad() # tvloss = TVLoss(SR)*(2e-9) # SR_loss = loss(SR, hr) + tvloss # tvloss.backward() # if batch == 20: # and epoch%2 == 0: # checkpoint.add_log(torch.zeros(1, len(args.scale))) # SR = utils.quantize(SR,args.rgb_range) # save_list = [] # save_list.extend([lr, hr]) # and epoch%20 == 0: # checkpoint.log = ave_psnr | 1.956442 | 2 |
standard_library/colorlog_ex.py | mcclayac/20PythonLibraries | 0 | 6613919 | """
created: mcclayac
Company Name : BigMAN Software
MyName: <NAME>
date: 11/23/18
day of month: 23
Project Name: 20PythonLibraries
filename:
package name:
IDE: PyCharm
"""
import colorlog
logger = colorlog.getLogger()
logger.setLevel(colorlog.colorlog.logging.DEBUG)
handler = colorlog.StreamHandler()
handler.setFormatter(colorlog.ColoredFormatter())
logger.addHandler(handler)
logger.debug("Debug message")
logger.info("Information message")
logger.warning("Warning message")
logger.error("Error message")
logger.critical("Critical message")
| """
created: mcclayac
Company Name : BigMAN Software
MyName: <NAME>
date: 11/23/18
day of month: 23
Project Name: 20PythonLibraries
filename:
package name:
IDE: PyCharm
"""
import colorlog
logger = colorlog.getLogger()
logger.setLevel(colorlog.colorlog.logging.DEBUG)
handler = colorlog.StreamHandler()
handler.setFormatter(colorlog.ColoredFormatter())
logger.addHandler(handler)
logger.debug("Debug message")
logger.info("Information message")
logger.warning("Warning message")
logger.error("Error message")
logger.critical("Critical message")
| en | 0.654996 | created: mcclayac Company Name : BigMAN Software MyName: <NAME> date: 11/23/18 day of month: 23 Project Name: 20PythonLibraries filename: package name: IDE: PyCharm | 2.094635 | 2 |
app/posts/routes.py | Lester016/Reverie | 3 | 6613920 | <filename>app/posts/routes.py
from flask import (render_template, url_for, flash,
redirect, request, Blueprint)
from flask_login import current_user, login_required
from app import db
from app.models import Post
from app.posts.forms import NewPost
from app.posts.utils import save_post_image
from datetime import timedelta
# The first argument is use to navigate different routes using that Blueprint
posts = Blueprint('posts', __name__)
@posts.route("/create_post", methods=['GET', 'POST'])
@login_required
def create_post():
form = NewPost()
if form.validate_on_submit():
postImage = ""
if form.postImage.data:
postImage = save_post_image(form.postImage.data)
post = Post(
Title=form.title.data,
Content=form.content.data,
ImageFile=postImage,
Author=current_user
)
db.session.add(post)
db.session.commit()
flash("Succesfully Posted!", category='success')
return redirect(url_for('users.profile'))
return render_template('posts/create-post.html', form=form, title="Make a story")
@posts.route("/posts/<int:postID>", methods=['GET', 'POST'])
def post(postID):
post = Post.query.get_or_404(postID)
return render_template('posts/post.html', title="Posts", post=post)
@posts.route("/posts/<int:postID>/update", methods=['GET', 'POST'])
@login_required
def update_post(postID):
post = Post.query.get_or_404(postID)
if post.Author != current_user:
abort(403)
form = NewPost()
if form.validate_on_submit():
if form.postImage.data:
postImage = save_post_image(form.postImage.data)
post.ImageFile = postImage
post.Title = form.title.data
post.Content = form.content.data
db.session.commit()
return redirect(url_for('posts.post', postID=post.id))
elif request.method == 'GET':
form.title.data = post.Title
form.content.data = post.Content
form.postImage.data = post.ImageFile
return render_template('posts/update-post.html', title="Update Post", form=form, post=post)
@posts.route("/posts/<int:postID>/delete", methods=['POST'])
@login_required
def delete_post(postID):
post = Post.query.get_or_404(postID)
if post.Author != current_user:
abort(403)
db.session.delete(post)
db.session.commit()
flash("Post has been deleted", category='danger')
return redirect(url_for('users.profile'))
| <filename>app/posts/routes.py
from flask import (render_template, url_for, flash,
redirect, request, Blueprint)
from flask_login import current_user, login_required
from app import db
from app.models import Post
from app.posts.forms import NewPost
from app.posts.utils import save_post_image
from datetime import timedelta
# The first argument is use to navigate different routes using that Blueprint
posts = Blueprint('posts', __name__)
@posts.route("/create_post", methods=['GET', 'POST'])
@login_required
def create_post():
form = NewPost()
if form.validate_on_submit():
postImage = ""
if form.postImage.data:
postImage = save_post_image(form.postImage.data)
post = Post(
Title=form.title.data,
Content=form.content.data,
ImageFile=postImage,
Author=current_user
)
db.session.add(post)
db.session.commit()
flash("Succesfully Posted!", category='success')
return redirect(url_for('users.profile'))
return render_template('posts/create-post.html', form=form, title="Make a story")
@posts.route("/posts/<int:postID>", methods=['GET', 'POST'])
def post(postID):
post = Post.query.get_or_404(postID)
return render_template('posts/post.html', title="Posts", post=post)
@posts.route("/posts/<int:postID>/update", methods=['GET', 'POST'])
@login_required
def update_post(postID):
post = Post.query.get_or_404(postID)
if post.Author != current_user:
abort(403)
form = NewPost()
if form.validate_on_submit():
if form.postImage.data:
postImage = save_post_image(form.postImage.data)
post.ImageFile = postImage
post.Title = form.title.data
post.Content = form.content.data
db.session.commit()
return redirect(url_for('posts.post', postID=post.id))
elif request.method == 'GET':
form.title.data = post.Title
form.content.data = post.Content
form.postImage.data = post.ImageFile
return render_template('posts/update-post.html', title="Update Post", form=form, post=post)
@posts.route("/posts/<int:postID>/delete", methods=['POST'])
@login_required
def delete_post(postID):
post = Post.query.get_or_404(postID)
if post.Author != current_user:
abort(403)
db.session.delete(post)
db.session.commit()
flash("Post has been deleted", category='danger')
return redirect(url_for('users.profile'))
| en | 0.824976 | # The first argument is use to navigate different routes using that Blueprint | 2.485398 | 2 |
crawler/sitting_info/transform.py | bangyuwen/2020voting-guide | 5 | 6613921 | import json
from typing import List, Dict, Optional
def transform(sitting_info: List[str]) -> str:
sittings: Dict[str, Dict[str, List[Dict[str, Optional[str]]]]] = {}
sitting_pages = [json.loads(page)['jsonList'] for page in sitting_info]
for partial_sittings in sitting_pages:
for sitting in partial_sittings:
term = sitting['term'] + '屆'
meeting_unit = sitting['meetingUnit']
if term not in sittings:
sittings[term] = {}
if meeting_unit not in sittings[term]:
sittings[term][meeting_unit] = []
sittings[term][meeting_unit].append(sitting)
return json.dumps(sittings, ensure_ascii=False)
| import json
from typing import List, Dict, Optional
def transform(sitting_info: List[str]) -> str:
sittings: Dict[str, Dict[str, List[Dict[str, Optional[str]]]]] = {}
sitting_pages = [json.loads(page)['jsonList'] for page in sitting_info]
for partial_sittings in sitting_pages:
for sitting in partial_sittings:
term = sitting['term'] + '屆'
meeting_unit = sitting['meetingUnit']
if term not in sittings:
sittings[term] = {}
if meeting_unit not in sittings[term]:
sittings[term][meeting_unit] = []
sittings[term][meeting_unit].append(sitting)
return json.dumps(sittings, ensure_ascii=False)
| none | 1 | 3.114224 | 3 | |
app/__init__.py | TiMMaTTiee/bramsplanter | 0 | 6613922 | <reponame>TiMMaTTiee/bramsplanter
import os
from flask import Flask, current_app, send_file
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS, cross_origin
from flask_migrate import Migrate
from dotenv import load_dotenv
from .api import api_bp
from .client import client_bp
load_dotenv() # take environment variables from .env.
app = Flask(__name__, static_folder='../dist/static')
app.register_blueprint(api_bp)
# app.register_blueprint(client_bp)
from .config import Config
app.logger.info('>>> {}'.format(Config.FLASK_ENV))
uri = os.getenv("DATABASE_URL") # or other relevant config var
if uri and uri.startswith("postgres://"):
uri = uri.replace("postgres://", "postgresql://", 1)
Config.SQLALCHEMY_DATABASE_URI=uri
db = SQLAlchemy()
migrate = Migrate()
db.init_app(app)
migrate.init_app(app, db)
app.logger.info('>>> {}'.format(Config.SQLALCHEMY_DATABASE_URI))
from .models import Users
# enable CORS
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
@app.route('/')
def index_client():
dist_dir = current_app.config['DIST_DIR']
entry = os.path.join(dist_dir, 'index.html')
return send_file(entry)
| import os
from flask import Flask, current_app, send_file
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS, cross_origin
from flask_migrate import Migrate
from dotenv import load_dotenv
from .api import api_bp
from .client import client_bp
load_dotenv() # take environment variables from .env.
app = Flask(__name__, static_folder='../dist/static')
app.register_blueprint(api_bp)
# app.register_blueprint(client_bp)
from .config import Config
app.logger.info('>>> {}'.format(Config.FLASK_ENV))
uri = os.getenv("DATABASE_URL") # or other relevant config var
if uri and uri.startswith("postgres://"):
uri = uri.replace("postgres://", "postgresql://", 1)
Config.SQLALCHEMY_DATABASE_URI=uri
db = SQLAlchemy()
migrate = Migrate()
db.init_app(app)
migrate.init_app(app, db)
app.logger.info('>>> {}'.format(Config.SQLALCHEMY_DATABASE_URI))
from .models import Users
# enable CORS
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
@app.route('/')
def index_client():
dist_dir = current_app.config['DIST_DIR']
entry = os.path.join(dist_dir, 'index.html')
return send_file(entry) | en | 0.43512 | # take environment variables from .env. # app.register_blueprint(client_bp) # or other relevant config var # enable CORS | 2.119409 | 2 |
pkg/__init__.py | consbio/simple-lpk | 0 | 6613923 | <filename>pkg/__init__.py
import glob
import os
import click
import pkg_resources
import struct
import tempfile
import shutil
import uuid
from zipfile import ZipFile, ZIP_DEFLATED, ZIP_STORED
import warnings
try:
import zlib
compression = ZIP_DEFLATED
except:
warnings.warn('Unable to load zlib, unable to compress files into LPK (stored instead)')
compression = ZIP_STORED
GEOM_TYPES = {
1: 'point',
3: 'line',
5: 'polygon',
8: 'point', # MultiPoint
11: 'point', # PointZ
13: 'line', # PolylineZ
15: 'polygon' # PolygonZ
}
# other types not yet supported
LYR = '0000data.lyr'
PKINFO = """<?xml version="1.0" encoding="utf-8" ?>
<?xml-stylesheet type="text/xsl" href="http://www.arcgisonline.com/home/pkinfostylesheet.xsl"?>
<pkinfo Culture='en-US'>
<ID>{0}</ID>
<name>Data</name>
<version>10.3</version>
<size>-1</size>
<created></created>
<type>Layer Package</type>
<servable>false</servable>
<packagelocation></packagelocation>
<pkinfolocation></pkinfolocation>
</pkinfo>"""
# Note: UUID needs to be updated in pkinfo
def create_lpk(shpfilename, outfilename):
# open shapefile in binary mode
# seek to byte 32: https://www.esri.com/library/whitepapers/pdfs/shapefile.pdf page 4
geom_type_code = None
with open(shpfilename, 'rb') as f:
f.seek(32)
geom_type_code = struct.unpack('i', f.read(4))[0]
if not geom_type_code in GEOM_TYPES:
raise ValueError('Geometry type of shapefile not supported: {0}'.format(geom_type_code))
geom_type = GEOM_TYPES[geom_type_code]
tmpdir = tempfile.mkdtemp()
try:
lyr_dir = os.path.join(tmpdir, 'v10')
os.mkdir(lyr_dir)
shutil.copy(
os.path.join(pkg_resources.resource_filename(__name__, 'lyr/{0}.lyr'.format(geom_type))),
os.path.join(lyr_dir, LYR)
)
esri_dir = os.path.join(tmpdir, 'esriinfo')
os.mkdir(esri_dir)
with open(os.path.join(esri_dir, 'item.pkinfo'), 'w') as outfile:
outfile.write(PKINFO.format(str(uuid.uuid4())))
shp_dir = os.path.join(tmpdir, 'commondata', 'data')
os.makedirs(shp_dir)
in_shp_dir, shp = os.path.split(shpfilename)
shp_root = os.path.splitext(shp)[0]
for filename in glob.glob(os.path.join(in_shp_dir, shp_root + '.*')):
if not filename.endswith('.lock'):
shutil.copy(filename, os.path.join(shp_dir, os.path.split(filename)[1].replace(shp_root, 'data')))
with ZipFile(outfilename, 'w', compression=compression) as zf:
for root, dirs, filenames in os.walk(tmpdir):
for filename in filenames:
zf.write(os.path.join(root, filename), os.path.join(os.path.relpath(root, tmpdir), filename))
finally:
shutil.rmtree(tmpdir)
@click.command(short_help="Create a simple ArcGIS layer package from a shapefile")
@click.argument('shp')
def pkg_lpk(shp):
"""Example:
\b
> pkg_lpk in.shp
creates file in.lpk
wildcards allowed:
\b
> pkg_lpk *.shp
"""
for shp in glob.glob(shp):
create_lpk(shp, shp.replace('.shp', '.lpk'))
| <filename>pkg/__init__.py
import glob
import os
import click
import pkg_resources
import struct
import tempfile
import shutil
import uuid
from zipfile import ZipFile, ZIP_DEFLATED, ZIP_STORED
import warnings
try:
import zlib
compression = ZIP_DEFLATED
except:
warnings.warn('Unable to load zlib, unable to compress files into LPK (stored instead)')
compression = ZIP_STORED
GEOM_TYPES = {
1: 'point',
3: 'line',
5: 'polygon',
8: 'point', # MultiPoint
11: 'point', # PointZ
13: 'line', # PolylineZ
15: 'polygon' # PolygonZ
}
# other types not yet supported
LYR = '0000data.lyr'
PKINFO = """<?xml version="1.0" encoding="utf-8" ?>
<?xml-stylesheet type="text/xsl" href="http://www.arcgisonline.com/home/pkinfostylesheet.xsl"?>
<pkinfo Culture='en-US'>
<ID>{0}</ID>
<name>Data</name>
<version>10.3</version>
<size>-1</size>
<created></created>
<type>Layer Package</type>
<servable>false</servable>
<packagelocation></packagelocation>
<pkinfolocation></pkinfolocation>
</pkinfo>"""
# Note: UUID needs to be updated in pkinfo
def create_lpk(shpfilename, outfilename):
# open shapefile in binary mode
# seek to byte 32: https://www.esri.com/library/whitepapers/pdfs/shapefile.pdf page 4
geom_type_code = None
with open(shpfilename, 'rb') as f:
f.seek(32)
geom_type_code = struct.unpack('i', f.read(4))[0]
if not geom_type_code in GEOM_TYPES:
raise ValueError('Geometry type of shapefile not supported: {0}'.format(geom_type_code))
geom_type = GEOM_TYPES[geom_type_code]
tmpdir = tempfile.mkdtemp()
try:
lyr_dir = os.path.join(tmpdir, 'v10')
os.mkdir(lyr_dir)
shutil.copy(
os.path.join(pkg_resources.resource_filename(__name__, 'lyr/{0}.lyr'.format(geom_type))),
os.path.join(lyr_dir, LYR)
)
esri_dir = os.path.join(tmpdir, 'esriinfo')
os.mkdir(esri_dir)
with open(os.path.join(esri_dir, 'item.pkinfo'), 'w') as outfile:
outfile.write(PKINFO.format(str(uuid.uuid4())))
shp_dir = os.path.join(tmpdir, 'commondata', 'data')
os.makedirs(shp_dir)
in_shp_dir, shp = os.path.split(shpfilename)
shp_root = os.path.splitext(shp)[0]
for filename in glob.glob(os.path.join(in_shp_dir, shp_root + '.*')):
if not filename.endswith('.lock'):
shutil.copy(filename, os.path.join(shp_dir, os.path.split(filename)[1].replace(shp_root, 'data')))
with ZipFile(outfilename, 'w', compression=compression) as zf:
for root, dirs, filenames in os.walk(tmpdir):
for filename in filenames:
zf.write(os.path.join(root, filename), os.path.join(os.path.relpath(root, tmpdir), filename))
finally:
shutil.rmtree(tmpdir)
@click.command(short_help="Create a simple ArcGIS layer package from a shapefile")
@click.argument('shp')
def pkg_lpk(shp):
"""Example:
\b
> pkg_lpk in.shp
creates file in.lpk
wildcards allowed:
\b
> pkg_lpk *.shp
"""
for shp in glob.glob(shp):
create_lpk(shp, shp.replace('.shp', '.lpk'))
| en | 0.421061 | # MultiPoint # PointZ # PolylineZ # PolygonZ # other types not yet supported <?xml version="1.0" encoding="utf-8" ?> <?xml-stylesheet type="text/xsl" href="http://www.arcgisonline.com/home/pkinfostylesheet.xsl"?> <pkinfo Culture='en-US'> <ID>{0}</ID> <name>Data</name> <version>10.3</version> <size>-1</size> <created></created> <type>Layer Package</type> <servable>false</servable> <packagelocation></packagelocation> <pkinfolocation></pkinfolocation> </pkinfo> # Note: UUID needs to be updated in pkinfo # open shapefile in binary mode # seek to byte 32: https://www.esri.com/library/whitepapers/pdfs/shapefile.pdf page 4 Example: \b > pkg_lpk in.shp creates file in.lpk wildcards allowed: \b > pkg_lpk *.shp | 2.344214 | 2 |
bookmarksync/__main__.py | jontwo/bookmark-sync | 0 | 6613924 | <reponame>jontwo/bookmark-sync
"""Bookmark Sync main module."""
import argparse
import os
from xmldiff.main import patch_tree
from bookmarksync.helpers import load_bookmarks, save_bookmarks, save_tree, diff_trees
def main():
"""Runs bookmark sync main method.
TODO
options
- create new merged.html
- compare existing merged.html with new bookmarks.html
- compare and save
- store all intermediate diffs (by datestamp? diff time or modified time?)
- apply each diff since last sync
"""
args = parse_args()
if args.bookmark_file:
new_bkm = load_bookmarks(args.bookmark_file)
sync_file = args.sync_file
if not os.path.dirname(sync_file):
sync_file = os.path.join(os.path.dirname(os.path.abspath(args.bookmark_file)),
sync_file)
try:
old_bkm = load_bookmarks(sync_file)
except IOError:
save_tree(new_bkm, sync_file)
return
diff = diff_trees(new_bkm, old_bkm)
output_file = None
if args.save_file:
output_file = args.save_file
elif args.overwrite_file:
output_file = args.bookmark_file
if output_file:
updated_bkm = patch_tree(diff, old_bkm)
save_bookmarks(updated_bkm, output_file)
else:
print(diff)
def parse_args():
"""Creates parser and reads args."""
parser = argparse.ArgumentParser(description='Utility to sync exported bookmark files')
parser.add_argument('--bookmark_file', '-b', type=os.path.expanduser,
help='Compare bookmark file to the current sync file.')
parser.add_argument('--save_file', '-s', type=os.path.expanduser,
help='Save the compared bookmarks to a new bookmark file.')
parser.add_argument('--overwrite_file', '-o', action='store_true',
help='Save the compared bookmarks to the input bookmark file.')
parser.add_argument('--sync_file', '-y', type=os.path.expanduser, default='merged.html',
help='Sync file to store current bookmarks. (default: %(default)s in '
'same directory as bookmark file). This will be created if it does not '
'exist.')
return parser.parse_args()
| """Bookmark Sync main module."""
import argparse
import os
from xmldiff.main import patch_tree
from bookmarksync.helpers import load_bookmarks, save_bookmarks, save_tree, diff_trees
def main():
"""Runs bookmark sync main method.
TODO
options
- create new merged.html
- compare existing merged.html with new bookmarks.html
- compare and save
- store all intermediate diffs (by datestamp? diff time or modified time?)
- apply each diff since last sync
"""
args = parse_args()
if args.bookmark_file:
new_bkm = load_bookmarks(args.bookmark_file)
sync_file = args.sync_file
if not os.path.dirname(sync_file):
sync_file = os.path.join(os.path.dirname(os.path.abspath(args.bookmark_file)),
sync_file)
try:
old_bkm = load_bookmarks(sync_file)
except IOError:
save_tree(new_bkm, sync_file)
return
diff = diff_trees(new_bkm, old_bkm)
output_file = None
if args.save_file:
output_file = args.save_file
elif args.overwrite_file:
output_file = args.bookmark_file
if output_file:
updated_bkm = patch_tree(diff, old_bkm)
save_bookmarks(updated_bkm, output_file)
else:
print(diff)
def parse_args():
"""Creates parser and reads args."""
parser = argparse.ArgumentParser(description='Utility to sync exported bookmark files')
parser.add_argument('--bookmark_file', '-b', type=os.path.expanduser,
help='Compare bookmark file to the current sync file.')
parser.add_argument('--save_file', '-s', type=os.path.expanduser,
help='Save the compared bookmarks to a new bookmark file.')
parser.add_argument('--overwrite_file', '-o', action='store_true',
help='Save the compared bookmarks to the input bookmark file.')
parser.add_argument('--sync_file', '-y', type=os.path.expanduser, default='merged.html',
help='Sync file to store current bookmarks. (default: %(default)s in '
'same directory as bookmark file). This will be created if it does not '
'exist.')
return parser.parse_args() | en | 0.85799 | Bookmark Sync main module. Runs bookmark sync main method. TODO options - create new merged.html - compare existing merged.html with new bookmarks.html - compare and save - store all intermediate diffs (by datestamp? diff time or modified time?) - apply each diff since last sync Creates parser and reads args. | 3.034844 | 3 |
apps/core/models.py | geekodour-old-hackathons/skillenza-iitg | 0 | 6613925 | <gh_stars>0
from django.db import models
class Category(models.Model):
name = models.CharField(max_length=80)
| from django.db import models
class Category(models.Model):
name = models.CharField(max_length=80) | none | 1 | 2.094785 | 2 | |
tamara/goodnight.py | hydrius/Tamara2 | 0 | 6613926 | from tamara import Tamara
import datetime
class GoodMorningAndGoodNight():
def __init__(self):
self.Tamara = Tamara()
self.status = 1
def main(self):
while True:
now = datetime.datetime.now()
if status == 1:
if now.hour == 21 and now.minute == 0:
self.Tamara.say("Good. night. sluts", override=True)
status = 0
elif now.hour == 9 and now.minute == 0:
self.Tamara.say("Good. Morning. Where is my Coffee?", override=True)
status = 0
if now.hour == 12 or now.hour == 7:
status = 1
if __name__ == "__main__":
x = GoodMorningAndGoodNight()
x.main()
| from tamara import Tamara
import datetime
class GoodMorningAndGoodNight():
def __init__(self):
self.Tamara = Tamara()
self.status = 1
def main(self):
while True:
now = datetime.datetime.now()
if status == 1:
if now.hour == 21 and now.minute == 0:
self.Tamara.say("Good. night. sluts", override=True)
status = 0
elif now.hour == 9 and now.minute == 0:
self.Tamara.say("Good. Morning. Where is my Coffee?", override=True)
status = 0
if now.hour == 12 or now.hour == 7:
status = 1
if __name__ == "__main__":
x = GoodMorningAndGoodNight()
x.main()
| none | 1 | 3.23303 | 3 | |
bcs-ui/backend/web_console/auth.py | laodiu/bk-bcs | 599 | 6613927 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 TH<NAME>, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import logging
from functools import wraps
import tornado.web
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from backend.components.utils import http_get
from .session import session_mgr
logger = logging.getLogger(__name__)
def authenticated(view_func):
"""权限认证装饰器"""
@wraps(view_func)
def _wrapped_view(self, *args, **kwargs):
session_id = self.get_argument("session_id", None)
if not session_id:
raise tornado.web.HTTPError(401, log_message=_("session_id为空"))
project_id = kwargs.get("project_id", "")
cluster_id = kwargs.get("cluster_id", "")
session = session_mgr.create(project_id, cluster_id)
ctx = session.get(session_id)
if not ctx:
raise tornado.web.HTTPError(401, log_message=_("获取ctx为空, session_id不正确或者已经过期"))
kwargs["context"] = ctx
return view_func(self, *args, **kwargs)
return _wrapped_view
| # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 TH<NAME>, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import logging
from functools import wraps
import tornado.web
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from backend.components.utils import http_get
from .session import session_mgr
logger = logging.getLogger(__name__)
def authenticated(view_func):
"""权限认证装饰器"""
@wraps(view_func)
def _wrapped_view(self, *args, **kwargs):
session_id = self.get_argument("session_id", None)
if not session_id:
raise tornado.web.HTTPError(401, log_message=_("session_id为空"))
project_id = kwargs.get("project_id", "")
cluster_id = kwargs.get("cluster_id", "")
session = session_mgr.create(project_id, cluster_id)
ctx = session.get(session_id)
if not ctx:
raise tornado.web.HTTPError(401, log_message=_("获取ctx为空, session_id不正确或者已经过期"))
kwargs["context"] = ctx
return view_func(self, *args, **kwargs)
return _wrapped_view
| en | 0.86264 | # -*- coding: utf-8 -*- Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available. Copyright (C) 2017-2021 TH<NAME>, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. 权限认证装饰器 | 1.925623 | 2 |
rdsupdater-deva.py | WMTU/rdsupdater | 0 | 6613928 | # import system libraries
import urllib, urllib.parse, urllib.request, json
# import telnet to communicate with DEVA RDS
from telnetlib import Telnet
#### CONFIGURATION ####
rds_ip = '10.10.100.73'
rds_port = '1024'
log_url = "https://log.wmtu.fm/api/2.0/log"
log_args = { 'n': '1', 'delay': 'true' }
#### helper functions ####
# fetch the current song information
def fetchSong(log_url, log_args):
# do the request for song data
try:
url = log_url + '?' + urllib.parse.urlencode(log_args)
request = urllib.request.Request(url, headers={'User-Agent': 'RDS-Updater'})
data = urllib.request.urlopen(request).read()
data = json.loads(data.decode('utf-8'))
except (Exception, urllib.error.HTTPError) as e:
print("HTTP Error => ", e)
data = [{'song': '91.9FM', 'artist': 'WMTU', 'album': 'WMTU'}]
except (Exception, urllib.error.URLError) as e:
print("URL Error => ", e)
data = [{'song': '91.9FM', 'artist': 'WMTU', 'album': 'WMTU'}]
# set current song
c_title = data[0]['song']
c_artist = data[0]['artist']
c_album = data[0]['album']
print("=> CURRENT SONG: {} by {}".format(c_title, c_artist))
return { 'title': c_title, 'artist': c_artist, 'album': c_album }
def updateRDS(telnet_conn, title, artist):
# format string for the RDS injector
rdstext = "TEXT={} by {}\n\r".format(title, artist)
print("=> " + rdstext)
# send the string to RDS injector
# note: strings must be byte endoded, so use encode() when sending
telnet_conn.write(rdstext.encode('ascii') + b"\n")
return True
#### main program ####
print("MANUAL RDS UPDATE PROGRAM")
print("-------------------------\n")
# open up a new telnet connection
with Telnet(rds_ip, rds_port) as tn:
run = True
while run == True:
prompt = input("Update RDS? (y/n) => ")
if prompt != "y":
run = False
else:
run = True
song_data = fetchSong(log_url, log_args)
updateRDS(tn, song_data['title'], song_data['artist'])
exit(0) | # import system libraries
import urllib, urllib.parse, urllib.request, json
# import telnet to communicate with DEVA RDS
from telnetlib import Telnet
#### CONFIGURATION ####
rds_ip = '10.10.100.73'
rds_port = '1024'
log_url = "https://log.wmtu.fm/api/2.0/log"
log_args = { 'n': '1', 'delay': 'true' }
#### helper functions ####
# fetch the current song information
def fetchSong(log_url, log_args):
# do the request for song data
try:
url = log_url + '?' + urllib.parse.urlencode(log_args)
request = urllib.request.Request(url, headers={'User-Agent': 'RDS-Updater'})
data = urllib.request.urlopen(request).read()
data = json.loads(data.decode('utf-8'))
except (Exception, urllib.error.HTTPError) as e:
print("HTTP Error => ", e)
data = [{'song': '91.9FM', 'artist': 'WMTU', 'album': 'WMTU'}]
except (Exception, urllib.error.URLError) as e:
print("URL Error => ", e)
data = [{'song': '91.9FM', 'artist': 'WMTU', 'album': 'WMTU'}]
# set current song
c_title = data[0]['song']
c_artist = data[0]['artist']
c_album = data[0]['album']
print("=> CURRENT SONG: {} by {}".format(c_title, c_artist))
return { 'title': c_title, 'artist': c_artist, 'album': c_album }
def updateRDS(telnet_conn, title, artist):
# format string for the RDS injector
rdstext = "TEXT={} by {}\n\r".format(title, artist)
print("=> " + rdstext)
# send the string to RDS injector
# note: strings must be byte endoded, so use encode() when sending
telnet_conn.write(rdstext.encode('ascii') + b"\n")
return True
#### main program ####
print("MANUAL RDS UPDATE PROGRAM")
print("-------------------------\n")
# open up a new telnet connection
with Telnet(rds_ip, rds_port) as tn:
run = True
while run == True:
prompt = input("Update RDS? (y/n) => ")
if prompt != "y":
run = False
else:
run = True
song_data = fetchSong(log_url, log_args)
updateRDS(tn, song_data['title'], song_data['artist'])
exit(0) | en | 0.591882 | # import system libraries # import telnet to communicate with DEVA RDS #### CONFIGURATION #### #### helper functions #### # fetch the current song information # do the request for song data # set current song # format string for the RDS injector # send the string to RDS injector # note: strings must be byte endoded, so use encode() when sending #### main program #### # open up a new telnet connection | 2.509591 | 3 |
tests/qgrid.py | Optimox/docker-python | 1 | 6613929 | import unittest
import pandas as pd
from qgrid import QgridWidget
class TestQgrid(unittest.TestCase):
def test_nans():
df = pd.DataFrame([(pd.Timestamp('2017-02-02'), np.nan),
(4, 2),
('foo', 'bar')])
view = QgridWidget(df=df)
view._handle_qgrid_msg_helper({
'type': 'change_sort',
'sort_field': 1,
'sort_ascending': True
})
| import unittest
import pandas as pd
from qgrid import QgridWidget
class TestQgrid(unittest.TestCase):
def test_nans():
df = pd.DataFrame([(pd.Timestamp('2017-02-02'), np.nan),
(4, 2),
('foo', 'bar')])
view = QgridWidget(df=df)
view._handle_qgrid_msg_helper({
'type': 'change_sort',
'sort_field': 1,
'sort_ascending': True
})
| none | 1 | 2.750258 | 3 | |
clothes/tests/test_clothes_views.py | daikikuchi/Personal-closet | 0 | 6613930 | <gh_stars>0
from django.test import TestCase, Client
from django.urls import reverse
from django.utils import timezone
from django.contrib.auth import get_user_model
from clothes import models
from clothes.tests.test_clothes_models import (sample_brand, sample_category,
sample_subcategory, sample_shop)
def create_other_user():
other_user = get_user_model().objects.create_user(
email='<EMAIL>',
password='<PASSWORD>'
)
return other_user
class Cloths_Model_Test(TestCase):
def setUp(self):
self.client = Client()
self.user = get_user_model().objects.create_user(
email='<EMAIL>',
password='password'
)
def test_brand_listing_for_logged_in_user(self):
"""Test list view of Brand objects for logged in user"""
self.client.force_login(self.user)
other_user = create_other_user()
# Create brand object with logged_in user
sample_brand(self.user, name='Lardini')
# Create brand object with other_user
sample_brand(user=other_user, name='Gransasso')
response = self.client.get(reverse('clothes:brand_list'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Lardini')
self.assertTemplateUsed(response, 'brand/brand_list.html')
self.assertEqual(len(response.context['brand_list']), 1)
def test_brand_listing_for_logged_out_user(self):
"""Test accessing to list view of Brand objects with logged out user"""
response = self.client.get(reverse('clothes:brand_list'))
self.assertEqual(response.status_code, 302)
self.assertRedirects(
response, '%s?next=/clothes/brand/' % (reverse('account_login')))
response = self.client.get(
'%s?next=/clothes/brand/' % (reverse('account_login')))
self.assertContains(response, 'ログイン')
def test_brand_clothes_listing_for_logged_in_user(self):
"""Test listing of clothes of a brand for logged in user"""
category = sample_category(self.user)
brand = sample_brand(self.user, name='Lardini')
self.client.force_login(self.user)
other_user = create_other_user()
# Create clothe object with logged_in user
models.Clothes.objects.create(
user=self.user,
name='Lardini shirt Jacket',
price=35000,
description='ラルディーニのシャツジャケット',
brand=brand,
sub_category=sample_subcategory(category=category),
shop=sample_shop(self.user),
purchased=timezone.now(),
)
# Create clothe object with other_user
models.Clothes.objects.create(
user=other_user,
name='<NAME>',
price=22000,
description='Gransassoのシャツジャケット',
brand=sample_brand(user=other_user, name='Gransasso'),
sub_category=sample_subcategory(category=category),
shop=sample_shop(other_user, name='ModernBlue'),
purchased=timezone.now(),
)
response = self.client.get(reverse('clothes:brand_clothes',
kwargs={'id': brand.id,
'slug': brand.slug}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Lardini shirt Jacket')
self.assertTemplateUsed(response, 'brand/brand_clothes_list.html')
self.assertEqual(len(response.context['brand_clothes_list']), 1)
def test_brand_clothes_listing_for_logged_out_user(self):
"""Test for logged_out_user to access to brand clotes list view"""
brand = sample_brand(user=self.user)
response = self.client.get(reverse('clothes:brand_clothes',
kwargs={'id': brand.id,
'slug': brand.slug}))
self.assertEqual(response.status_code, 302)
self.assertRedirects(
response, '%s?next=/clothes/brand/%s/%s/'
% (reverse('account_login'), brand.id, brand.slug))
response = self.client.get(
'%s?next=/clothes/brand/%s/%s/'
% (reverse('account_login'), brand.id, brand.slug))
self.assertContains(response, 'ログイン')
def test_category_listing_for_logged_in_user(self):
"""Test list view of Category objects for logged in user"""
self.client.force_login(self.user)
other_user = create_other_user()
# Create category object with logged_in user
sample_category(user=self.user)
# Create category object with other_user
sample_category(user=other_user, name='Knit')
response = self.client.get(reverse('clothes:category_list'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Jacket')
self.assertTemplateUsed(response, 'category/category_list.html')
self.assertEqual(len(response.context['category_list']), 1)
def test_category_listing_for_logged_out_user(self):
"""
Test accessing to list view of category objects with logged out user
"""
response = self.client.get(reverse('clothes:category_list'))
self.assertEqual(response.status_code, 302)
self.assertRedirects(
response, '%s?next=/clothes/category/'
% (reverse('account_login')))
response = self.client.get(
'%s?next=/clothes/category/' % (reverse('account_login')))
self.assertContains(response, 'ログイン')
def test_category_clothes_listing_for_logged_in_user(self):
"""Test listing of clothes of a Category for logged in user"""
self.client.force_login(self.user)
category = sample_category(self.user)
# Create clothe object with logged_in user
models.Clothes.objects.create(
user=self.user,
name='<NAME>',
price=35000,
description='ラルディーニのシャツジャケット',
brand=sample_brand(self.user),
sub_category=sample_subcategory(category=category),
shop=sample_shop(self.user),
purchased=timezone.now(),
)
other_user = create_other_user()
# Create clothe object with other_user
models.Clothes.objects.create(
user=other_user,
name='<NAME>',
price=22000,
description='Gransassoのシャツジャケット',
brand=sample_brand(user=other_user, name='Gransasso'),
sub_category=sample_subcategory(category=category),
shop=sample_shop(other_user, name='ModernBlue'),
purchased=timezone.now(),
)
response = self.client.get(reverse('clothes:category_clothes',
kwargs={'slug': category.slug}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Lardini shirt Jacket')
self.assertTemplateUsed(response,
'category/category_clothes_list.html')
self.assertEqual(len(response.context['category_clothes_list']), 1)
def test_category_clothes_listing_for_logged_out_user(self):
"""Test for logged_out_user to access to category clothes list view"""
category = sample_category(self.user)
response = self.client.get(reverse('clothes:category_clothes',
kwargs={'slug': category.slug}))
self.assertEqual(response.status_code, 302)
self.assertRedirects(
response, '%s?next=/clothes/category/%s/'
% (reverse('account_login'), category.slug))
response = self.client.get(
'%s?next=/clothes/category/%s/'
% (reverse('account_login'), category.slug))
self.assertContains(response, 'ログイン')
def test_shop_listing_for_logged_in_user(self):
"""Test list view of Shop objects for logged in user"""
self.client.force_login(self.user)
other_user = create_other_user()
# Create shop object with logged_in user
sample_shop(self.user)
# Create shop object with other_user
sample_shop(user=other_user, name='Ships')
response = self.client.get(reverse('clothes:shop_list'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Beams')
self.assertTemplateUsed(response, 'shop/shop_list.html')
# Only shop created by logged_in_user is in the list
self.assertEqual(len(response.context['shop_list']), 1)
def test_shop_listing_for_logged_out_user(self):
"""Test accessing to list view of shop objects with logged out user"""
response = self.client.get(reverse('clothes:shop_list'))
self.assertEqual(response.status_code, 302)
self.assertRedirects(
response, '%s?next=/clothes/shop/'
% (reverse('account_login')))
response = self.client.get(
'%s?next=/clothes/shop/' % (reverse('account_login')))
self.assertContains(response, 'ログイン')
def test_shop_clothes_listing_for_logged_in_user(self):
"""Test listing of clothes of a shop for logged in user"""
category = sample_category(self.user)
shop = sample_shop(self.user)
self.client.force_login(self.user)
other_user = create_other_user()
# Create clothes object with logged_in user
models.Clothes.objects.create(
user=self.user,
name='<NAME>',
price=35000,
description='ラルディーニのシャツジャケット',
brand=sample_brand(self.user),
sub_category=sample_subcategory(category=category),
shop=shop,
purchased=timezone.now(),
)
# Create clothes object with other_user
models.Clothes.objects.create(
user=other_user,
name='<NAME>',
price=22000,
description='Gransassoのシャツジャケット',
brand=sample_brand(user=other_user, name='Gransasso'),
sub_category=sample_subcategory(category=category),
shop=shop,
purchased=timezone.now(),
)
response = self.client.get(reverse('clothes:shop_clothes',
kwargs={'slug': shop.slug}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Lardini shirt Jacket')
self.assertTemplateUsed(response, 'shop/shop_clothes_list.html')
# Only clothes of shop created by logged_in_user is in the list
self.assertEqual(len(response.context['shop_clothes_list']), 1)
def test_shop_clothes_listing_for_logged_out_user(self):
"""Test for logged_out_user to access to shop's clothes list view"""
shop = sample_shop(self.user)
response = self.client.get(reverse('clothes:shop_clothes',
kwargs={'slug': shop.slug}))
self.assertEqual(response.status_code, 302)
self.assertRedirects(
response, '%s?next=/clothes/shop/%s/'
% (reverse('account_login'), shop.slug))
response = self.client.get(
'%s?next=/clothes/shop/%s/'
% (reverse('account_login'), shop.slug))
self.assertContains(response, 'ログイン')
def test_clothes_listing_for_logged_in_user(self):
"""Test list view of clothe objects for logged in user"""
self.client.force_login(self.user)
other_user = create_other_user()
category = sample_category(self.user)
# Create clothes object with logged_in user
models.Clothes.objects.create(
user=self.user,
name='<NAME>',
price=35000,
description='ラルディーニのシャツジャケット',
brand=sample_brand(self.user),
sub_category=sample_subcategory(category=category),
shop=sample_shop(user=self.user),
purchased=timezone.now(),
)
# Create clothes object with other_user
models.Clothes.objects.create(
user=other_user,
name='<NAME>',
price=22000,
description='Gransassoのシャツジャケット',
brand=sample_brand(user=other_user, name='Gransasso'),
sub_category=sample_subcategory(category=category),
shop=sample_shop(user=other_user, name='Ships'),
purchased=timezone.now(),
)
response = self.client.get(reverse('clothes_list_home'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Lardini')
self.assertTemplateUsed(response, 'home.html')
self.assertEqual(len(response.context['clothes_list']), 1)
def test_clothes_listing_for_logged_out_user(self):
"""
Test accessing to list view of Clothes objects with logged out user
"""
response = self.client.get(reverse('clothes_list_home'))
self.assertEqual(response.status_code, 302)
self.assertRedirects(
response, '%s?next=/' % (reverse('account_login')))
response = self.client.get(
'%s?next=/' % (reverse('account_login')))
self.assertContains(response, 'ログイン')
def test_clothes_detail_view(self):
"""Test detail view for clothes"""
self.client.force_login(self.user)
category = sample_category(self.user)
# Create clothes object with logged_in user
self.clothes = models.Clothes.objects.create(
user=self.user,
name='<NAME>',
price=35000,
description='ラルディーニのシャツジャケット',
brand=sample_brand(self.user),
sub_category=sample_subcategory(category=category),
shop=sample_shop(user=self.user),
purchased=timezone.now(),
)
response = self.client.get(self.clothes.get_absolute_url())
no_response = self.client.get('/clothes/19/')
self.assertEqual(response.status_code, 200)
self.assertEqual(no_response.status_code, 404)
self.assertContains(response, 'Lardini shirt Jacket')
self.assertContains(response, 'ラルディーニのシャツジャケット')
self.assertTemplateUsed(response, 'clothes/clothes_detail.html')
def test_search_clothes_by_brand_name(self):
"""Test returning clothes with specific brand name"""
self.client.force_login(self.user)
category = sample_category(self.user)
# Create clothes object to be included in search result
clothes1 = models.Clothes.objects.create(
user=self.user,
name='Lardini shirt Jacket',
price=35000,
description='ラルディーニのシャツジャケット',
brand=sample_brand(self.user, name='LARDINI'),
sub_category=sample_subcategory(category=category),
shop=sample_shop(user=self.user),
purchased=timezone.now(),
)
# Create another clothes object not to be included in search result
clothes2 = models.Clothes.objects.create(
user=self.user,
name='Zanoneタートルネック',
price=35000,
description='Zanoneのタートルネック',
brand=sample_brand(self.user, name='ZANONE'),
sub_category=sample_subcategory(category=category),
shop=sample_shop(user=self.user, name='Ships'),
purchased=timezone.now(),
)
response = (self.client.get("%s?q=%s" %
(reverse('clothes:search_results'), clothes1.brand.name)))
self.assertContains(response, clothes1.brand)
self.assertContains(response, clothes1.name)
self.assertNotContains(response, clothes2.name)
self.assertNotContains(response, clothes2.brand)
self.assertEqual(len(response.context['clothes_list']), 1)
self.assertTemplateUsed(response,
'clothes/clothes_search_results.html')
def test_search_clothes_by_clothes_name(self):
"""Test returning clothes with a part of clothes name"""
self.client.force_login(self.user)
category = sample_category(self.user)
# Create clothes object to be included in search result
clothes1 = models.Clothes.objects.create(
user=self.user,
name='YCHAI ワンウォッシュコットンストレッチデニムジーンズ「ROBUSTO',
price=35000,
description='YPU004-2DS0001A サイズ31 ウエスト82 股上28 股下73 ワタリ31 裾幅18.8',
brand=sample_brand(self.user, name='YCHAI'),
sub_category=sample_subcategory(category=category),
shop=sample_shop(user=self.user),
purchased=timezone.now(),
)
# Create another clothes object not to be included in search result
clothes2 = models.Clothes.objects.create(
user=self.user,
name='Zanoneタートルネック',
price=35000,
description='Zanoneのタートルネック',
brand=sample_brand(self.user, name='ZANONE'),
sub_category=sample_subcategory(category=category),
shop=sample_shop(user=self.user, name='Ships'),
purchased=timezone.now(),
)
response = self.client.get("%s?q=%s" %
(reverse('clothes:search_results'), 'デニム'))
self.assertContains(response, clothes1.brand)
self.assertContains(response, clothes1.name)
self.assertNotContains(response, clothes2.name)
self.assertNotContains(response, clothes2.brand)
self.assertEqual(len(response.context['clothes_list']), 1)
self.assertTemplateUsed(response,
'clothes/clothes_search_results.html')
def test_search_clothes_by_shop_name(self):
"""Test returning clothes with specific shop name"""
self.client.force_login(self.user)
category = sample_category(self.user)
# Create clothes object to be included in search result
clothes1 = models.Clothes.objects.create(
user=self.user,
name='YCHAI ワンウォッシュコットンストレッチデニムジーンズ「ROBUSTO',
price=35000,
description='YPU004-2DS0001A サイズ31 ウエスト82 股上28 股下73 ワタリ31 裾幅18.8',
brand=sample_brand(self.user, name='YCHAI'),
sub_category=sample_subcategory(category=category),
shop=sample_shop(user=self.user, name='Beams'),
purchased=timezone.now(),
)
# Create another clothes object not to be included in search result
clothes2 = models.Clothes.objects.create(
user=self.user,
name='Zanoneタートルネック',
price=35000,
description='Zanoneのタートルネック',
brand=sample_brand(self.user, name='ZANONE'),
sub_category=sample_subcategory(category=category),
shop=sample_shop(user=self.user, name='Ships'),
purchased=timezone.now(),
)
response = (self.client.get("%s?q=%s"
% (reverse('clothes:search_results'), clothes1.shop.name)))
self.assertContains(response, clothes1.brand)
self.assertContains(response, clothes1.name)
self.assertNotContains(response, clothes2.name)
self.assertNotContains(response, clothes2.brand)
self.assertEqual(len(response.context['clothes_list']), 1)
self.assertTemplateUsed(response,
'clothes/clothes_search_results.html')
| from django.test import TestCase, Client
from django.urls import reverse
from django.utils import timezone
from django.contrib.auth import get_user_model
from clothes import models
from clothes.tests.test_clothes_models import (sample_brand, sample_category,
sample_subcategory, sample_shop)
def create_other_user():
other_user = get_user_model().objects.create_user(
email='<EMAIL>',
password='<PASSWORD>'
)
return other_user
class Cloths_Model_Test(TestCase):
def setUp(self):
self.client = Client()
self.user = get_user_model().objects.create_user(
email='<EMAIL>',
password='password'
)
def test_brand_listing_for_logged_in_user(self):
"""Test list view of Brand objects for logged in user"""
self.client.force_login(self.user)
other_user = create_other_user()
# Create brand object with logged_in user
sample_brand(self.user, name='Lardini')
# Create brand object with other_user
sample_brand(user=other_user, name='Gransasso')
response = self.client.get(reverse('clothes:brand_list'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Lardini')
self.assertTemplateUsed(response, 'brand/brand_list.html')
self.assertEqual(len(response.context['brand_list']), 1)
def test_brand_listing_for_logged_out_user(self):
"""Test accessing to list view of Brand objects with logged out user"""
response = self.client.get(reverse('clothes:brand_list'))
self.assertEqual(response.status_code, 302)
self.assertRedirects(
response, '%s?next=/clothes/brand/' % (reverse('account_login')))
response = self.client.get(
'%s?next=/clothes/brand/' % (reverse('account_login')))
self.assertContains(response, 'ログイン')
def test_brand_clothes_listing_for_logged_in_user(self):
"""Test listing of clothes of a brand for logged in user"""
category = sample_category(self.user)
brand = sample_brand(self.user, name='Lardini')
self.client.force_login(self.user)
other_user = create_other_user()
# Create clothe object with logged_in user
models.Clothes.objects.create(
user=self.user,
name='Lardini shirt Jacket',
price=35000,
description='ラルディーニのシャツジャケット',
brand=brand,
sub_category=sample_subcategory(category=category),
shop=sample_shop(self.user),
purchased=timezone.now(),
)
# Create clothe object with other_user
models.Clothes.objects.create(
user=other_user,
name='<NAME>',
price=22000,
description='Gransassoのシャツジャケット',
brand=sample_brand(user=other_user, name='Gransasso'),
sub_category=sample_subcategory(category=category),
shop=sample_shop(other_user, name='ModernBlue'),
purchased=timezone.now(),
)
response = self.client.get(reverse('clothes:brand_clothes',
kwargs={'id': brand.id,
'slug': brand.slug}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Lardini shirt Jacket')
self.assertTemplateUsed(response, 'brand/brand_clothes_list.html')
self.assertEqual(len(response.context['brand_clothes_list']), 1)
def test_brand_clothes_listing_for_logged_out_user(self):
"""Test for logged_out_user to access to brand clotes list view"""
brand = sample_brand(user=self.user)
response = self.client.get(reverse('clothes:brand_clothes',
kwargs={'id': brand.id,
'slug': brand.slug}))
self.assertEqual(response.status_code, 302)
self.assertRedirects(
response, '%s?next=/clothes/brand/%s/%s/'
% (reverse('account_login'), brand.id, brand.slug))
response = self.client.get(
'%s?next=/clothes/brand/%s/%s/'
% (reverse('account_login'), brand.id, brand.slug))
self.assertContains(response, 'ログイン')
def test_category_listing_for_logged_in_user(self):
"""Test list view of Category objects for logged in user"""
self.client.force_login(self.user)
other_user = create_other_user()
# Create category object with logged_in user
sample_category(user=self.user)
# Create category object with other_user
sample_category(user=other_user, name='Knit')
response = self.client.get(reverse('clothes:category_list'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Jacket')
self.assertTemplateUsed(response, 'category/category_list.html')
self.assertEqual(len(response.context['category_list']), 1)
def test_category_listing_for_logged_out_user(self):
"""
Test accessing to list view of category objects with logged out user
"""
response = self.client.get(reverse('clothes:category_list'))
self.assertEqual(response.status_code, 302)
self.assertRedirects(
response, '%s?next=/clothes/category/'
% (reverse('account_login')))
response = self.client.get(
'%s?next=/clothes/category/' % (reverse('account_login')))
self.assertContains(response, 'ログイン')
def test_category_clothes_listing_for_logged_in_user(self):
"""Test listing of clothes of a Category for logged in user"""
self.client.force_login(self.user)
category = sample_category(self.user)
# Create clothe object with logged_in user
models.Clothes.objects.create(
user=self.user,
name='<NAME>',
price=35000,
description='ラルディーニのシャツジャケット',
brand=sample_brand(self.user),
sub_category=sample_subcategory(category=category),
shop=sample_shop(self.user),
purchased=timezone.now(),
)
other_user = create_other_user()
# Create clothe object with other_user
models.Clothes.objects.create(
user=other_user,
name='<NAME>',
price=22000,
description='Gransassoのシャツジャケット',
brand=sample_brand(user=other_user, name='Gransasso'),
sub_category=sample_subcategory(category=category),
shop=sample_shop(other_user, name='ModernBlue'),
purchased=timezone.now(),
)
response = self.client.get(reverse('clothes:category_clothes',
kwargs={'slug': category.slug}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Lardini shirt Jacket')
self.assertTemplateUsed(response,
'category/category_clothes_list.html')
self.assertEqual(len(response.context['category_clothes_list']), 1)
def test_category_clothes_listing_for_logged_out_user(self):
"""Test for logged_out_user to access to category clothes list view"""
category = sample_category(self.user)
response = self.client.get(reverse('clothes:category_clothes',
kwargs={'slug': category.slug}))
self.assertEqual(response.status_code, 302)
self.assertRedirects(
response, '%s?next=/clothes/category/%s/'
% (reverse('account_login'), category.slug))
response = self.client.get(
'%s?next=/clothes/category/%s/'
% (reverse('account_login'), category.slug))
self.assertContains(response, 'ログイン')
def test_shop_listing_for_logged_in_user(self):
"""Test list view of Shop objects for logged in user"""
self.client.force_login(self.user)
other_user = create_other_user()
# Create shop object with logged_in user
sample_shop(self.user)
# Create shop object with other_user
sample_shop(user=other_user, name='Ships')
response = self.client.get(reverse('clothes:shop_list'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Beams')
self.assertTemplateUsed(response, 'shop/shop_list.html')
# Only shop created by logged_in_user is in the list
self.assertEqual(len(response.context['shop_list']), 1)
def test_shop_listing_for_logged_out_user(self):
"""Test accessing to list view of shop objects with logged out user"""
response = self.client.get(reverse('clothes:shop_list'))
self.assertEqual(response.status_code, 302)
self.assertRedirects(
response, '%s?next=/clothes/shop/'
% (reverse('account_login')))
response = self.client.get(
'%s?next=/clothes/shop/' % (reverse('account_login')))
self.assertContains(response, 'ログイン')
def test_shop_clothes_listing_for_logged_in_user(self):
"""Test listing of clothes of a shop for logged in user"""
category = sample_category(self.user)
shop = sample_shop(self.user)
self.client.force_login(self.user)
other_user = create_other_user()
# Create clothes object with logged_in user
models.Clothes.objects.create(
user=self.user,
name='<NAME>',
price=35000,
description='ラルディーニのシャツジャケット',
brand=sample_brand(self.user),
sub_category=sample_subcategory(category=category),
shop=shop,
purchased=timezone.now(),
)
# Create clothes object with other_user
models.Clothes.objects.create(
user=other_user,
name='<NAME>',
price=22000,
description='Gransassoのシャツジャケット',
brand=sample_brand(user=other_user, name='Gransasso'),
sub_category=sample_subcategory(category=category),
shop=shop,
purchased=timezone.now(),
)
response = self.client.get(reverse('clothes:shop_clothes',
kwargs={'slug': shop.slug}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Lardini shirt Jacket')
self.assertTemplateUsed(response, 'shop/shop_clothes_list.html')
# Only clothes of shop created by logged_in_user is in the list
self.assertEqual(len(response.context['shop_clothes_list']), 1)
def test_shop_clothes_listing_for_logged_out_user(self):
"""Test for logged_out_user to access to shop's clothes list view"""
shop = sample_shop(self.user)
response = self.client.get(reverse('clothes:shop_clothes',
kwargs={'slug': shop.slug}))
self.assertEqual(response.status_code, 302)
self.assertRedirects(
response, '%s?next=/clothes/shop/%s/'
% (reverse('account_login'), shop.slug))
response = self.client.get(
'%s?next=/clothes/shop/%s/'
% (reverse('account_login'), shop.slug))
self.assertContains(response, 'ログイン')
def test_clothes_listing_for_logged_in_user(self):
"""Test list view of clothe objects for logged in user"""
self.client.force_login(self.user)
other_user = create_other_user()
category = sample_category(self.user)
# Create clothes object with logged_in user
models.Clothes.objects.create(
user=self.user,
name='<NAME>',
price=35000,
description='ラルディーニのシャツジャケット',
brand=sample_brand(self.user),
sub_category=sample_subcategory(category=category),
shop=sample_shop(user=self.user),
purchased=timezone.now(),
)
# Create clothes object with other_user
models.Clothes.objects.create(
user=other_user,
name='<NAME>',
price=22000,
description='Gransassoのシャツジャケット',
brand=sample_brand(user=other_user, name='Gransasso'),
sub_category=sample_subcategory(category=category),
shop=sample_shop(user=other_user, name='Ships'),
purchased=timezone.now(),
)
response = self.client.get(reverse('clothes_list_home'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Lardini')
self.assertTemplateUsed(response, 'home.html')
self.assertEqual(len(response.context['clothes_list']), 1)
def test_clothes_listing_for_logged_out_user(self):
"""
Test accessing to list view of Clothes objects with logged out user
"""
response = self.client.get(reverse('clothes_list_home'))
self.assertEqual(response.status_code, 302)
self.assertRedirects(
response, '%s?next=/' % (reverse('account_login')))
response = self.client.get(
'%s?next=/' % (reverse('account_login')))
self.assertContains(response, 'ログイン')
def test_clothes_detail_view(self):
"""Test detail view for clothes"""
self.client.force_login(self.user)
category = sample_category(self.user)
# Create clothes object with logged_in user
self.clothes = models.Clothes.objects.create(
user=self.user,
name='<NAME>',
price=35000,
description='ラルディーニのシャツジャケット',
brand=sample_brand(self.user),
sub_category=sample_subcategory(category=category),
shop=sample_shop(user=self.user),
purchased=timezone.now(),
)
response = self.client.get(self.clothes.get_absolute_url())
no_response = self.client.get('/clothes/19/')
self.assertEqual(response.status_code, 200)
self.assertEqual(no_response.status_code, 404)
self.assertContains(response, 'Lardini shirt Jacket')
self.assertContains(response, 'ラルディーニのシャツジャケット')
self.assertTemplateUsed(response, 'clothes/clothes_detail.html')
def test_search_clothes_by_brand_name(self):
"""Test returning clothes with specific brand name"""
self.client.force_login(self.user)
category = sample_category(self.user)
# Create clothes object to be included in search result
clothes1 = models.Clothes.objects.create(
user=self.user,
name='Lardini shirt Jacket',
price=35000,
description='ラルディーニのシャツジャケット',
brand=sample_brand(self.user, name='LARDINI'),
sub_category=sample_subcategory(category=category),
shop=sample_shop(user=self.user),
purchased=timezone.now(),
)
# Create another clothes object not to be included in search result
clothes2 = models.Clothes.objects.create(
user=self.user,
name='Zanoneタートルネック',
price=35000,
description='Zanoneのタートルネック',
brand=sample_brand(self.user, name='ZANONE'),
sub_category=sample_subcategory(category=category),
shop=sample_shop(user=self.user, name='Ships'),
purchased=timezone.now(),
)
response = (self.client.get("%s?q=%s" %
(reverse('clothes:search_results'), clothes1.brand.name)))
self.assertContains(response, clothes1.brand)
self.assertContains(response, clothes1.name)
self.assertNotContains(response, clothes2.name)
self.assertNotContains(response, clothes2.brand)
self.assertEqual(len(response.context['clothes_list']), 1)
self.assertTemplateUsed(response,
'clothes/clothes_search_results.html')
def test_search_clothes_by_clothes_name(self):
"""Test returning clothes with a part of clothes name"""
self.client.force_login(self.user)
category = sample_category(self.user)
# Create clothes object to be included in search result
clothes1 = models.Clothes.objects.create(
user=self.user,
name='YCHAI ワンウォッシュコットンストレッチデニムジーンズ「ROBUSTO',
price=35000,
description='YPU004-2DS0001A サイズ31 ウエスト82 股上28 股下73 ワタリ31 裾幅18.8',
brand=sample_brand(self.user, name='YCHAI'),
sub_category=sample_subcategory(category=category),
shop=sample_shop(user=self.user),
purchased=timezone.now(),
)
# Create another clothes object not to be included in search result
clothes2 = models.Clothes.objects.create(
user=self.user,
name='Zanoneタートルネック',
price=35000,
description='Zanoneのタートルネック',
brand=sample_brand(self.user, name='ZANONE'),
sub_category=sample_subcategory(category=category),
shop=sample_shop(user=self.user, name='Ships'),
purchased=timezone.now(),
)
response = self.client.get("%s?q=%s" %
(reverse('clothes:search_results'), 'デニム'))
self.assertContains(response, clothes1.brand)
self.assertContains(response, clothes1.name)
self.assertNotContains(response, clothes2.name)
self.assertNotContains(response, clothes2.brand)
self.assertEqual(len(response.context['clothes_list']), 1)
self.assertTemplateUsed(response,
'clothes/clothes_search_results.html')
def test_search_clothes_by_shop_name(self):
"""Test returning clothes with specific shop name"""
self.client.force_login(self.user)
category = sample_category(self.user)
# Create clothes object to be included in search result
clothes1 = models.Clothes.objects.create(
user=self.user,
name='YCHAI ワンウォッシュコットンストレッチデニムジーンズ「ROBUSTO',
price=35000,
description='YPU004-2DS0001A サイズ31 ウエスト82 股上28 股下73 ワタリ31 裾幅18.8',
brand=sample_brand(self.user, name='YCHAI'),
sub_category=sample_subcategory(category=category),
shop=sample_shop(user=self.user, name='Beams'),
purchased=timezone.now(),
)
# Create another clothes object not to be included in search result
clothes2 = models.Clothes.objects.create(
user=self.user,
name='Zanoneタートルネック',
price=35000,
description='Zanoneのタートルネック',
brand=sample_brand(self.user, name='ZANONE'),
sub_category=sample_subcategory(category=category),
shop=sample_shop(user=self.user, name='Ships'),
purchased=timezone.now(),
)
response = (self.client.get("%s?q=%s"
% (reverse('clothes:search_results'), clothes1.shop.name)))
self.assertContains(response, clothes1.brand)
self.assertContains(response, clothes1.name)
self.assertNotContains(response, clothes2.name)
self.assertNotContains(response, clothes2.brand)
self.assertEqual(len(response.context['clothes_list']), 1)
self.assertTemplateUsed(response,
'clothes/clothes_search_results.html') | en | 0.896209 | Test list view of Brand objects for logged in user # Create brand object with logged_in user # Create brand object with other_user Test accessing to list view of Brand objects with logged out user Test listing of clothes of a brand for logged in user # Create clothe object with logged_in user # Create clothe object with other_user Test for logged_out_user to access to brand clotes list view Test list view of Category objects for logged in user # Create category object with logged_in user # Create category object with other_user Test accessing to list view of category objects with logged out user Test listing of clothes of a Category for logged in user # Create clothe object with logged_in user # Create clothe object with other_user Test for logged_out_user to access to category clothes list view Test list view of Shop objects for logged in user # Create shop object with logged_in user # Create shop object with other_user # Only shop created by logged_in_user is in the list Test accessing to list view of shop objects with logged out user Test listing of clothes of a shop for logged in user # Create clothes object with logged_in user # Create clothes object with other_user # Only clothes of shop created by logged_in_user is in the list Test for logged_out_user to access to shop's clothes list view Test list view of clothe objects for logged in user # Create clothes object with logged_in user # Create clothes object with other_user Test accessing to list view of Clothes objects with logged out user Test detail view for clothes # Create clothes object with logged_in user Test returning clothes with specific brand name # Create clothes object to be included in search result # Create another clothes object not to be included in search result Test returning clothes with a part of clothes name # Create clothes object to be included in search result # Create another clothes object not to be included in search result Test returning clothes with specific shop name # Create clothes object to be included in search result # Create another clothes object not to be included in search result | 2.566102 | 3 |
miranda/deh_melcc/__init__.py | Ouranosinc/miranda | 4 | 6613931 | from .daily import open_txt
| from .daily import open_txt
| none | 1 | 1.036214 | 1 | |
tests/test_widgets.py | lachmanfrantisek/sen | 956 | 6613932 | import logging
import random
from itertools import chain
import pytest
from flexmock import flexmock
from urwid.listbox import SimpleListWalker
from sen.tui.widgets.list.base import WidgetBase
from sen.tui.widgets.list.common import ScrollableListBox, AsyncScrollableListBox
from sen.tui.widgets.list.util import ResponsiveRowWidget
from sen.tui.widgets.table import ResponsiveTable, assemble_rows
from .utils import get_random_text_widget
from .constants import SCREEN_WIDTH, SCREEN_HEIGHT
class MockUI:
buffers = []
def refresh(self):
pass
def set_alarm_in(self, *args, **kwargs):
pass
class DataGenerator:
@classmethod
def text(cls, prefix="line", lines_no=3, return_bytes=False):
s = "\n".join(["{}{}".format(prefix, x+1) for x in range(lines_no)])
if return_bytes:
return s.encode("utf-8")
return s
@classmethod
def stream(cls, prefix="line", lines_no=3, return_bytes=False):
text = []
for x in range(lines_no):
l = "{}{}\n".format(prefix, x+1)
if return_bytes:
l = l.encode("utf-8")
text.append(l)
s = chain(text)
return iter(s)
@classmethod
def render(cls, prefix="line", lines_no=3, return_bytes=True):
w = "{:%d}" % SCREEN_WIDTH
response = []
for x in range(SCREEN_HEIGHT):
if x >= lines_no:
l = w.format("")
else:
l = w.format("{}{}".format(prefix, x+1))
if return_bytes:
response.append(l.encode("utf-8"))
else:
response.append(l)
return response
@pytest.mark.parametrize("inp,expected", [
(DataGenerator.text(), DataGenerator.render()),
(DataGenerator.text(return_bytes=True), DataGenerator.render()),
(DataGenerator.text(prefix="liné"), DataGenerator.render(prefix="liné")),
(DataGenerator.text(prefix="liné", return_bytes=True), DataGenerator.render(prefix="liné")),
])
def test_scrollable_listbox(inp, expected):
lb = ScrollableListBox(MockUI(), inp)
canvas = lb.render((SCREEN_WIDTH, SCREEN_HEIGHT))
text = [bytes().join([t for at, cs, t in ln]) for ln in canvas.content()]
assert text == expected
@pytest.mark.parametrize("inp,expected", [
(DataGenerator.stream(), DataGenerator.render()),
(DataGenerator.stream(return_bytes=True), DataGenerator.render()),
(DataGenerator.stream(prefix="liné"), DataGenerator.render(prefix="liné")),
(DataGenerator.stream(prefix="liné", return_bytes=True), DataGenerator.render(prefix="liné")),
])
def test_async_scrollable_listbox(inp, expected):
ui = flexmock(refresh=lambda: None)
lb = AsyncScrollableListBox(inp, ui)
lb.thread.join()
canvas = lb.render((SCREEN_WIDTH, SCREEN_HEIGHT))
text = [bytes().join([t for at, cs, t in ln]) for ln in canvas.content()]
w = "{:%d}" % SCREEN_WIDTH
s = w.format("{}".format("No more logs."))
expected[4] = s.encode("utf-8")
assert text == expected
def test_table_random_data():
rows = [ResponsiveRowWidget([get_random_text_widget(random.randint(2, 9)) for _ in range(5)])
for _ in range(5)]
table = ResponsiveTable(MockUI(), SimpleListWalker(rows))
canvas = table.render((80, 20), focus=False)
text = [bytes().join([t for at, cs, t in ln]) for ln in canvas.content()]
logging.info("%r", text)
assert len(text) == 20
assert text[0].startswith(rows[0].original_widget.widget_list[0].text.encode("utf-8"))
def test_table_empty():
rows = []
table = ResponsiveTable(MockUI(), SimpleListWalker(rows))
canvas = table.render((80, 20), focus=False)
text = [bytes().join([t for at, cs, t in ln]) for ln in canvas.content()]
assert len(text) == 20
assert text[0] == b" " * 80
def test_assemble_rows_long_text():
rows = [[get_random_text_widget(10),
get_random_text_widget(300)] for _ in range(5)]
assembled_rows = assemble_rows(rows, ignore_columns=[1])
lb = WidgetBase(MockUI(), SimpleListWalker(assembled_rows))
canvas = lb.render((80, 20), focus=False)
text = [bytes().join([t for at, cs, t in ln]) for ln in canvas.content()]
logging.info("%r", text)
assert len(text) == 20
first_col, second_col = text[0].split(b" ", 1)
assert first_col == rows[0][0].text.encode("utf-8")
assert rows[0][1].text.encode("utf-8").startswith(second_col)
| import logging
import random
from itertools import chain
import pytest
from flexmock import flexmock
from urwid.listbox import SimpleListWalker
from sen.tui.widgets.list.base import WidgetBase
from sen.tui.widgets.list.common import ScrollableListBox, AsyncScrollableListBox
from sen.tui.widgets.list.util import ResponsiveRowWidget
from sen.tui.widgets.table import ResponsiveTable, assemble_rows
from .utils import get_random_text_widget
from .constants import SCREEN_WIDTH, SCREEN_HEIGHT
class MockUI:
buffers = []
def refresh(self):
pass
def set_alarm_in(self, *args, **kwargs):
pass
class DataGenerator:
@classmethod
def text(cls, prefix="line", lines_no=3, return_bytes=False):
s = "\n".join(["{}{}".format(prefix, x+1) for x in range(lines_no)])
if return_bytes:
return s.encode("utf-8")
return s
@classmethod
def stream(cls, prefix="line", lines_no=3, return_bytes=False):
text = []
for x in range(lines_no):
l = "{}{}\n".format(prefix, x+1)
if return_bytes:
l = l.encode("utf-8")
text.append(l)
s = chain(text)
return iter(s)
@classmethod
def render(cls, prefix="line", lines_no=3, return_bytes=True):
w = "{:%d}" % SCREEN_WIDTH
response = []
for x in range(SCREEN_HEIGHT):
if x >= lines_no:
l = w.format("")
else:
l = w.format("{}{}".format(prefix, x+1))
if return_bytes:
response.append(l.encode("utf-8"))
else:
response.append(l)
return response
@pytest.mark.parametrize("inp,expected", [
(DataGenerator.text(), DataGenerator.render()),
(DataGenerator.text(return_bytes=True), DataGenerator.render()),
(DataGenerator.text(prefix="liné"), DataGenerator.render(prefix="liné")),
(DataGenerator.text(prefix="liné", return_bytes=True), DataGenerator.render(prefix="liné")),
])
def test_scrollable_listbox(inp, expected):
lb = ScrollableListBox(MockUI(), inp)
canvas = lb.render((SCREEN_WIDTH, SCREEN_HEIGHT))
text = [bytes().join([t for at, cs, t in ln]) for ln in canvas.content()]
assert text == expected
@pytest.mark.parametrize("inp,expected", [
(DataGenerator.stream(), DataGenerator.render()),
(DataGenerator.stream(return_bytes=True), DataGenerator.render()),
(DataGenerator.stream(prefix="liné"), DataGenerator.render(prefix="liné")),
(DataGenerator.stream(prefix="liné", return_bytes=True), DataGenerator.render(prefix="liné")),
])
def test_async_scrollable_listbox(inp, expected):
ui = flexmock(refresh=lambda: None)
lb = AsyncScrollableListBox(inp, ui)
lb.thread.join()
canvas = lb.render((SCREEN_WIDTH, SCREEN_HEIGHT))
text = [bytes().join([t for at, cs, t in ln]) for ln in canvas.content()]
w = "{:%d}" % SCREEN_WIDTH
s = w.format("{}".format("No more logs."))
expected[4] = s.encode("utf-8")
assert text == expected
def test_table_random_data():
rows = [ResponsiveRowWidget([get_random_text_widget(random.randint(2, 9)) for _ in range(5)])
for _ in range(5)]
table = ResponsiveTable(MockUI(), SimpleListWalker(rows))
canvas = table.render((80, 20), focus=False)
text = [bytes().join([t for at, cs, t in ln]) for ln in canvas.content()]
logging.info("%r", text)
assert len(text) == 20
assert text[0].startswith(rows[0].original_widget.widget_list[0].text.encode("utf-8"))
def test_table_empty():
rows = []
table = ResponsiveTable(MockUI(), SimpleListWalker(rows))
canvas = table.render((80, 20), focus=False)
text = [bytes().join([t for at, cs, t in ln]) for ln in canvas.content()]
assert len(text) == 20
assert text[0] == b" " * 80
def test_assemble_rows_long_text():
rows = [[get_random_text_widget(10),
get_random_text_widget(300)] for _ in range(5)]
assembled_rows = assemble_rows(rows, ignore_columns=[1])
lb = WidgetBase(MockUI(), SimpleListWalker(assembled_rows))
canvas = lb.render((80, 20), focus=False)
text = [bytes().join([t for at, cs, t in ln]) for ln in canvas.content()]
logging.info("%r", text)
assert len(text) == 20
first_col, second_col = text[0].split(b" ", 1)
assert first_col == rows[0][0].text.encode("utf-8")
assert rows[0][1].text.encode("utf-8").startswith(second_col)
| none | 1 | 2.056313 | 2 | |
CreateModel.py | wc596520206/web-gender-recognition | 0 | 6613933 | <gh_stars>0
import tensorflow as tf
class CreateModel(object):
def __init__(self, config):
self.model_type = config["global"]["model_type"]
self.im_size = config["global"]["im_size"]
self.class_num = config["global"]["class_num"]
def create(self):
# 搭建模型
if self.model_type == "MobileNet":
conv_base = tf.keras.applications.MobileNet(input_shape=(self.im_size, self.im_size, 3),
weights='imagenet',
include_top=False)
elif self.model_type == "Xception":
conv_base = tf.keras.applications.Xception(
weights='imagenet',
include_top=False)
elif self.model_type == "ResNet50":
conv_base = tf.keras.applications.ResNet50(
weights='imagenet',
include_top=False)
conv_base.trainable = True
# 构建模型
model = tf.keras.models.Sequential()
model.add(conv_base)
model.add(tf.keras.layers.GlobalAveragePooling2D())
model.add(tf.keras.layers.Dense(self.class_num, activation='relu')) # 两个类别
model.summary() # 每层参数信息
return model
| import tensorflow as tf
class CreateModel(object):
def __init__(self, config):
self.model_type = config["global"]["model_type"]
self.im_size = config["global"]["im_size"]
self.class_num = config["global"]["class_num"]
def create(self):
# 搭建模型
if self.model_type == "MobileNet":
conv_base = tf.keras.applications.MobileNet(input_shape=(self.im_size, self.im_size, 3),
weights='imagenet',
include_top=False)
elif self.model_type == "Xception":
conv_base = tf.keras.applications.Xception(
weights='imagenet',
include_top=False)
elif self.model_type == "ResNet50":
conv_base = tf.keras.applications.ResNet50(
weights='imagenet',
include_top=False)
conv_base.trainable = True
# 构建模型
model = tf.keras.models.Sequential()
model.add(conv_base)
model.add(tf.keras.layers.GlobalAveragePooling2D())
model.add(tf.keras.layers.Dense(self.class_num, activation='relu')) # 两个类别
model.summary() # 每层参数信息
return model | zh | 0.990379 | # 搭建模型 # 构建模型 # 两个类别 # 每层参数信息 | 2.748662 | 3 |
WiiRemoteGestures/WiiBase.py | romanchom/GestureRecognitionVR | 0 | 6613934 | <filename>WiiRemoteGestures/WiiBase.py
import random
import sqlite3
import numpy as np
from SQLBase import SQLBase
from Gesture import Gesture
class WiiBase(SQLBase):
def __init__(self, file, feature_set_extractor = None):
super(WiiBase, self).__init__()
db = sqlite3.connect(file)
c = db.cursor()
command = 'SELECT name, tester, trial, righthand, data FROM GestureTable WHERE trial <= 10 ORDER BY name '
raw_list = c.execute(command).fetchall()
for g in raw_list:
name = g[0]
self.testers.add(g[1])
self.users[g[1]] = 'left' if g[3] == 0 else 'right'
if name not in self.gesture_id:
identifier = self.class_count
self.class_count += 1
self.gesture_name.append(name)
self.gesture_id[name] = identifier
for g in raw_list:
data = np.frombuffer(g[4], dtype='float32')
data = np.reshape(data, [-1, 14])
if feature_set_extractor:
#data = np.apply_along_axis(feature_set_extractor, 1, data)
data = feature_set_extractor(data)
self.max_length = max(self.max_length, data.shape[0])
gesture = Gesture(
label_id = self.gesture_id[g[0]],
tester = g[1],
trial = g[2] - 1,
user_hand = 'left' if g[3] == 0 else 'right',
gesture_hand = 'left' if g[3] == 0 else 'right',
data = data
)
self.gesture_list.append(gesture)
self.feature_count = self.gesture_list[0].data.shape[1]
for g in self.gesture_list:
g.pad(self.max_length)
#print(self.gesture_list[0].data[:, 1])
# 14 floats per time point
# [0] timestamp
# [1:4] xyz position in meters
# [4:8] quaternion orientation
# [8:11] acceleration in local (controller) space
# [11:14] angular speed (yaw, pitch, roll)
def feature_set_P(data):
return np.apply_along_axis(lambda row: row[1:4], 1, data)
def feature_set_V(data):
shape = list(data.shape)
shape[1] = 3
vel = np.zeros(shape, dtype='float32')
count = shape[0]
for i in range(1, count):
vel[i] = (data[i, 1:4] - data[i - 1, 1:4]) * 60
return vel
def feature_set_PV(data):
pos = WiiBase.feature_set_P(data)
vel = WiiBase.feature_set_V(data)
return np.concatenate((pos, vel), 1)
def feature_set_PO(data):
return np.apply_along_axis(lambda row: row[1:8], 1, data)
def feature_set_O(data):
return np.apply_along_axis(lambda row: row[4:8], 1, data)
def feature_set_W(data):
return np.apply_along_axis(lambda row: row[4:8], 1, data)
def feature_set_A(data):
return np.apply_along_axis(lambda row: row[8:11], 1, data)
def feature_set_AW(data):
return np.apply_along_axis(lambda row: row[8:14], 1, data)
def feature_set_AWO(data):
return np.apply_along_axis(lambda row: row[4:14], 1, data)
def feature_set_PVO(data):
po = WiiBase.feature_set_PO(data)
vel = WiiBase.feature_set_V(data)
return np.concatenate((po, vel), 1)
def feature_set_PVOW(data):
po = WiiBase.feature_set_PO(data)
w = np.apply_along_axis(lambda row: row[11:14], 1, data)
vel = WiiBase.feature_set_V(data)
return np.concatenate((po, w, vel), 1)
def feature_set_PVOWA(data):
poaw = WiiBase.feature_set_POAW(data)
vel = WiiBase.feature_set_V(data)
return np.concatenate((poaw, vel), 1)
def feature_set_POAW(data):
return np.apply_along_axis(lambda row: row[1:14], 1, data)
| <filename>WiiRemoteGestures/WiiBase.py
import random
import sqlite3
import numpy as np
from SQLBase import SQLBase
from Gesture import Gesture
class WiiBase(SQLBase):
def __init__(self, file, feature_set_extractor = None):
super(WiiBase, self).__init__()
db = sqlite3.connect(file)
c = db.cursor()
command = 'SELECT name, tester, trial, righthand, data FROM GestureTable WHERE trial <= 10 ORDER BY name '
raw_list = c.execute(command).fetchall()
for g in raw_list:
name = g[0]
self.testers.add(g[1])
self.users[g[1]] = 'left' if g[3] == 0 else 'right'
if name not in self.gesture_id:
identifier = self.class_count
self.class_count += 1
self.gesture_name.append(name)
self.gesture_id[name] = identifier
for g in raw_list:
data = np.frombuffer(g[4], dtype='float32')
data = np.reshape(data, [-1, 14])
if feature_set_extractor:
#data = np.apply_along_axis(feature_set_extractor, 1, data)
data = feature_set_extractor(data)
self.max_length = max(self.max_length, data.shape[0])
gesture = Gesture(
label_id = self.gesture_id[g[0]],
tester = g[1],
trial = g[2] - 1,
user_hand = 'left' if g[3] == 0 else 'right',
gesture_hand = 'left' if g[3] == 0 else 'right',
data = data
)
self.gesture_list.append(gesture)
self.feature_count = self.gesture_list[0].data.shape[1]
for g in self.gesture_list:
g.pad(self.max_length)
#print(self.gesture_list[0].data[:, 1])
# 14 floats per time point
# [0] timestamp
# [1:4] xyz position in meters
# [4:8] quaternion orientation
# [8:11] acceleration in local (controller) space
# [11:14] angular speed (yaw, pitch, roll)
def feature_set_P(data):
return np.apply_along_axis(lambda row: row[1:4], 1, data)
def feature_set_V(data):
shape = list(data.shape)
shape[1] = 3
vel = np.zeros(shape, dtype='float32')
count = shape[0]
for i in range(1, count):
vel[i] = (data[i, 1:4] - data[i - 1, 1:4]) * 60
return vel
def feature_set_PV(data):
pos = WiiBase.feature_set_P(data)
vel = WiiBase.feature_set_V(data)
return np.concatenate((pos, vel), 1)
def feature_set_PO(data):
return np.apply_along_axis(lambda row: row[1:8], 1, data)
def feature_set_O(data):
return np.apply_along_axis(lambda row: row[4:8], 1, data)
def feature_set_W(data):
return np.apply_along_axis(lambda row: row[4:8], 1, data)
def feature_set_A(data):
return np.apply_along_axis(lambda row: row[8:11], 1, data)
def feature_set_AW(data):
return np.apply_along_axis(lambda row: row[8:14], 1, data)
def feature_set_AWO(data):
return np.apply_along_axis(lambda row: row[4:14], 1, data)
def feature_set_PVO(data):
po = WiiBase.feature_set_PO(data)
vel = WiiBase.feature_set_V(data)
return np.concatenate((po, vel), 1)
def feature_set_PVOW(data):
po = WiiBase.feature_set_PO(data)
w = np.apply_along_axis(lambda row: row[11:14], 1, data)
vel = WiiBase.feature_set_V(data)
return np.concatenate((po, w, vel), 1)
def feature_set_PVOWA(data):
poaw = WiiBase.feature_set_POAW(data)
vel = WiiBase.feature_set_V(data)
return np.concatenate((poaw, vel), 1)
def feature_set_POAW(data):
return np.apply_along_axis(lambda row: row[1:14], 1, data)
| en | 0.474113 | #data = np.apply_along_axis(feature_set_extractor, 1, data) #print(self.gesture_list[0].data[:, 1]) # 14 floats per time point # [0] timestamp # [1:4] xyz position in meters # [4:8] quaternion orientation # [8:11] acceleration in local (controller) space # [11:14] angular speed (yaw, pitch, roll) | 2.63364 | 3 |
train.py | HikariNoMJ14/jazz_transformer | 56 | 6613935 | import argparse
import sys, pickle , os
parser = argparse.ArgumentParser()
parser.add_argument('ckpt_dir' , help="the folder to save checkpoints")
parser.add_argument('log_file' , help="the file path to save log file")
args = parser.parse_args()
sys.path.append('./transformer_xl/')
sys.path.append('./src/')
import numpy as np
import pandas as pd
from glob import glob
from build_vocab import Vocab
# which gpu to use
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
from model_aug import TransformerXL
if __name__ == '__main__':
# load dictionary
# generated from build_vocab.py
vocab = pickle.load(open('pickles/remi_wstruct_vocab.pkl', 'rb'))
event2word, word2event = vocab.event2idx, vocab.idx2event
# load train data
# training_seqs_final.pkl : all songs' remi format
training_data_file = "data/training_seqs_struct_new_final.pkl"
print("loading training data from {}".format(training_data_file))
training_seqs = pickle.load( open(training_data_file, 'rb') )
# show size of trqaining data
print("Training data count: {}".format(len(training_seqs)))
# declare model
model = TransformerXL(
event2word=event2word,
word2event=word2event,
checkpoint=None,
is_training=True,
training_seqs=training_seqs)
# train
model.train_augment(output_checkpoint_folder=args.ckpt_dir, logfile=args.log_file)
# close
model.close()
| import argparse
import sys, pickle , os
parser = argparse.ArgumentParser()
parser.add_argument('ckpt_dir' , help="the folder to save checkpoints")
parser.add_argument('log_file' , help="the file path to save log file")
args = parser.parse_args()
sys.path.append('./transformer_xl/')
sys.path.append('./src/')
import numpy as np
import pandas as pd
from glob import glob
from build_vocab import Vocab
# which gpu to use
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
from model_aug import TransformerXL
if __name__ == '__main__':
# load dictionary
# generated from build_vocab.py
vocab = pickle.load(open('pickles/remi_wstruct_vocab.pkl', 'rb'))
event2word, word2event = vocab.event2idx, vocab.idx2event
# load train data
# training_seqs_final.pkl : all songs' remi format
training_data_file = "data/training_seqs_struct_new_final.pkl"
print("loading training data from {}".format(training_data_file))
training_seqs = pickle.load( open(training_data_file, 'rb') )
# show size of trqaining data
print("Training data count: {}".format(len(training_seqs)))
# declare model
model = TransformerXL(
event2word=event2word,
word2event=word2event,
checkpoint=None,
is_training=True,
training_seqs=training_seqs)
# train
model.train_augment(output_checkpoint_folder=args.ckpt_dir, logfile=args.log_file)
# close
model.close()
| en | 0.664792 | # which gpu to use # load dictionary # generated from build_vocab.py # load train data # training_seqs_final.pkl : all songs' remi format # show size of trqaining data # declare model # train # close | 2.282257 | 2 |
tests/test_repeat.py | barthaniki/conduit | 0 | 6613936 | <filename>tests/test_repeat.py<gh_stars>0
import time
import csv
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
options = Options()
options.headless = True
driver = webdriver.Chrome(executable_path=ChromeDriverManager().install(), options=options)
# test repeat function
def test_new_articles():
url = "http://localhost:1667/#/"
driver.get(url)
time.sleep(2)
# prerequisite: existing test user login
user_name = "testuser1"
user_email = (user_name + "@<EMAIL>")
password = "<PASSWORD>$"
driver.find_element(By.XPATH, '//a[@href="#/login"]').click()
driver.find_element(By.XPATH, '//*[@type="text"]').send_keys(user_email)
driver.find_element(By.XPATH, '//*[@type="password"]').send_keys(password)
driver.find_element(By.TAG_NAME, "button").click()
time.sleep(1)
user_home = driver.find_element(By.XPATH, f'//a[@href="#/@{user_name}/"]')
user_home.click()
time.sleep(1)
user_articles = len(driver.find_elements(By.XPATH, '//a//h1'))
time.sleep(1)
# test repeat function - create new articles from outer file
count = 0
with open("tests/new_articles.csv", "r") as data_file:
data_file = csv.reader(data_file, delimiter=",")
next(data_file)
for row in data_file:
driver.find_element(By.XPATH, '//a[@href="#/editor"]').click()
time.sleep(1)
driver.find_element(By.XPATH, '//input[@placeholder="Article Title"]').send_keys(row[0])
driver.find_element(By.XPATH, '//input[@placeholder="What\'s this article about?"]').send_keys(row[1])
driver.find_element(By.XPATH, '//textarea[@placeholder="Write your article (in markdown)"]') \
.send_keys(row[2])
driver.find_element(By.XPATH, '//input[@placeholder="Enter tags"]').send_keys(row[3])
driver.find_element(By.XPATH, '//button[@type="submit"]').click()
time.sleep(1)
count += 1
# check new articles in list
user_home.click()
time.sleep(1)
user_articles_extended = len(driver.find_elements(By.XPATH, '//a//h1'))
time.sleep(1)
assert user_articles_extended == user_articles + count
time.sleep(1)
# delete new articles to clear
for _ in range(count):
user_home.click()
time.sleep(2)
all_articles = driver.find_elements(By.XPATH, '//a//h1')
last_article = all_articles[-1]
last_article.click()
time.sleep(1)
del_btn = driver.find_element(By.XPATH, '//button[@class="btn btn-outline-danger btn-sm"]')
del_btn.click()
time.sleep(2)
# check after clear
user_home.click()
time.sleep(1)
user_articles_cleared = len(driver.find_elements(By.XPATH, '//a//h1'))
time.sleep(1)
assert user_articles_cleared == user_articles
# logout
driver.find_element(By.XPATH, '//a[@active-class="active"]').click()
driver.close()
driver.quit()
| <filename>tests/test_repeat.py<gh_stars>0
import time
import csv
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
options = Options()
options.headless = True
driver = webdriver.Chrome(executable_path=ChromeDriverManager().install(), options=options)
# test repeat function
def test_new_articles():
url = "http://localhost:1667/#/"
driver.get(url)
time.sleep(2)
# prerequisite: existing test user login
user_name = "testuser1"
user_email = (user_name + "@<EMAIL>")
password = "<PASSWORD>$"
driver.find_element(By.XPATH, '//a[@href="#/login"]').click()
driver.find_element(By.XPATH, '//*[@type="text"]').send_keys(user_email)
driver.find_element(By.XPATH, '//*[@type="password"]').send_keys(password)
driver.find_element(By.TAG_NAME, "button").click()
time.sleep(1)
user_home = driver.find_element(By.XPATH, f'//a[@href="#/@{user_name}/"]')
user_home.click()
time.sleep(1)
user_articles = len(driver.find_elements(By.XPATH, '//a//h1'))
time.sleep(1)
# test repeat function - create new articles from outer file
count = 0
with open("tests/new_articles.csv", "r") as data_file:
data_file = csv.reader(data_file, delimiter=",")
next(data_file)
for row in data_file:
driver.find_element(By.XPATH, '//a[@href="#/editor"]').click()
time.sleep(1)
driver.find_element(By.XPATH, '//input[@placeholder="Article Title"]').send_keys(row[0])
driver.find_element(By.XPATH, '//input[@placeholder="What\'s this article about?"]').send_keys(row[1])
driver.find_element(By.XPATH, '//textarea[@placeholder="Write your article (in markdown)"]') \
.send_keys(row[2])
driver.find_element(By.XPATH, '//input[@placeholder="Enter tags"]').send_keys(row[3])
driver.find_element(By.XPATH, '//button[@type="submit"]').click()
time.sleep(1)
count += 1
# check new articles in list
user_home.click()
time.sleep(1)
user_articles_extended = len(driver.find_elements(By.XPATH, '//a//h1'))
time.sleep(1)
assert user_articles_extended == user_articles + count
time.sleep(1)
# delete new articles to clear
for _ in range(count):
user_home.click()
time.sleep(2)
all_articles = driver.find_elements(By.XPATH, '//a//h1')
last_article = all_articles[-1]
last_article.click()
time.sleep(1)
del_btn = driver.find_element(By.XPATH, '//button[@class="btn btn-outline-danger btn-sm"]')
del_btn.click()
time.sleep(2)
# check after clear
user_home.click()
time.sleep(1)
user_articles_cleared = len(driver.find_elements(By.XPATH, '//a//h1'))
time.sleep(1)
assert user_articles_cleared == user_articles
# logout
driver.find_element(By.XPATH, '//a[@active-class="active"]').click()
driver.close()
driver.quit()
| en | 0.675273 | # test repeat function #/" # prerequisite: existing test user login # test repeat function - create new articles from outer file # check new articles in list # delete new articles to clear # check after clear # logout | 3.115104 | 3 |
utilities/vis_functions.py | openworm/behavioral_syntax | 3 | 6613937 | <filename>utilities/vis_functions.py
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 1 12:41:16 2015
@author: macbook
"""
#from behavioral_syntax.utilities.angle_and_skel import MA2skel
import matplotlib.pyplot as plt
#from behavioral_syntax.utilities.numericalc import largest_factors
from scipy.stats import itemfreq
import numpy as np
from scipy import io
#for bokeh charts:
"""
from bokeh.plotting import show, output_file
from bokeh.charts import Bar
"""
#visualizing postures is probably one of the most important tasks:
postures = '/Users/cyrilrocke/Documents/c_elegans/data/postures'
g = io.loadmat(postures)
postures = g.get('postures')
plt.style.use('ggplot')
"""
def bokeh_bars(liszt,name):
z = itemfreq(liszt)
z = z[np.argsort(z[:,1])]
data = {"y": list(z[:,1])}
#it would be great if there was a way to automatically fit the bokeh plot to the screen:
bar = Bar(data, list(map(str,z[:,0])), title="bars",width=1000,height=500)
output_file(name+".html", title=name)
show(bar)
plt.style.use('ggplot')"""
def grid_plot(list,kind,image_loc,image_name):
N = len(list)
#we select an n*n subset of the multi_array of length N which for n>=16
#is always greater than 99% of the members of the multi_array.
n = round(np.sqrt(N))
fig, axes = plt.subplots(ncols=n, nrows=n+1)
fig.set_size_inches(30, 30)
ax = axes.ravel()
fig.suptitle(image_name,fontsize=40,weight='bold')
if kind == 'histogram':
j = 0
for i in range(N):
if type(list[j]) == str:
list[j] = list[j].split(' ')
z = itemfreq(list[j])
z = z[np.argsort(z[:,1])]
ax[i].plot(z[:,1],'o')
ax[i].set_xticks(z[:,1])
#ax[i].plot(z[:,0],z[:,1],'o')
ax[i].set_title(str(j),size='medium',weight='bold',color='steelblue',backgroundcolor=(1, 0.85490196, 0.7254902))
j+=1
elif kind == 'CDF':
j = 0
for i in range(N):
ax[i].plot(list[j],'o')
ax[i].set_title(str(j),size='medium',weight='bold',color='steelblue',backgroundcolor=(1, 0.85490196, 0.7254902))
j+=1
if isinstance(image_loc+image_name,str):
fig.savefig(image_loc+image_name+'.png',dpi=fig.dpi)
| <filename>utilities/vis_functions.py
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 1 12:41:16 2015
@author: macbook
"""
#from behavioral_syntax.utilities.angle_and_skel import MA2skel
import matplotlib.pyplot as plt
#from behavioral_syntax.utilities.numericalc import largest_factors
from scipy.stats import itemfreq
import numpy as np
from scipy import io
#for bokeh charts:
"""
from bokeh.plotting import show, output_file
from bokeh.charts import Bar
"""
#visualizing postures is probably one of the most important tasks:
postures = '/Users/cyrilrocke/Documents/c_elegans/data/postures'
g = io.loadmat(postures)
postures = g.get('postures')
plt.style.use('ggplot')
"""
def bokeh_bars(liszt,name):
z = itemfreq(liszt)
z = z[np.argsort(z[:,1])]
data = {"y": list(z[:,1])}
#it would be great if there was a way to automatically fit the bokeh plot to the screen:
bar = Bar(data, list(map(str,z[:,0])), title="bars",width=1000,height=500)
output_file(name+".html", title=name)
show(bar)
plt.style.use('ggplot')"""
def grid_plot(list,kind,image_loc,image_name):
N = len(list)
#we select an n*n subset of the multi_array of length N which for n>=16
#is always greater than 99% of the members of the multi_array.
n = round(np.sqrt(N))
fig, axes = plt.subplots(ncols=n, nrows=n+1)
fig.set_size_inches(30, 30)
ax = axes.ravel()
fig.suptitle(image_name,fontsize=40,weight='bold')
if kind == 'histogram':
j = 0
for i in range(N):
if type(list[j]) == str:
list[j] = list[j].split(' ')
z = itemfreq(list[j])
z = z[np.argsort(z[:,1])]
ax[i].plot(z[:,1],'o')
ax[i].set_xticks(z[:,1])
#ax[i].plot(z[:,0],z[:,1],'o')
ax[i].set_title(str(j),size='medium',weight='bold',color='steelblue',backgroundcolor=(1, 0.85490196, 0.7254902))
j+=1
elif kind == 'CDF':
j = 0
for i in range(N):
ax[i].plot(list[j],'o')
ax[i].set_title(str(j),size='medium',weight='bold',color='steelblue',backgroundcolor=(1, 0.85490196, 0.7254902))
j+=1
if isinstance(image_loc+image_name,str):
fig.savefig(image_loc+image_name+'.png',dpi=fig.dpi)
| en | 0.634939 | # -*- coding: utf-8 -*- Created on Tue Sep 1 12:41:16 2015 @author: macbook #from behavioral_syntax.utilities.angle_and_skel import MA2skel #from behavioral_syntax.utilities.numericalc import largest_factors #for bokeh charts: from bokeh.plotting import show, output_file from bokeh.charts import Bar #visualizing postures is probably one of the most important tasks: def bokeh_bars(liszt,name): z = itemfreq(liszt) z = z[np.argsort(z[:,1])] data = {"y": list(z[:,1])} #it would be great if there was a way to automatically fit the bokeh plot to the screen: bar = Bar(data, list(map(str,z[:,0])), title="bars",width=1000,height=500) output_file(name+".html", title=name) show(bar) plt.style.use('ggplot') #we select an n*n subset of the multi_array of length N which for n>=16 #is always greater than 99% of the members of the multi_array. #ax[i].plot(z[:,0],z[:,1],'o') | 2.535583 | 3 |
src/z3c/testsetup/tests/othercave/doctest08.py | zopefoundation/z3c.testsetup | 1 | 6613938 | <reponame>zopefoundation/z3c.testsetup
"""
Doctests in a Python module
===========================
We can place doctests also in Python modules.
:doctest:
Here the Cave class is defined::
>>> from z3c.testsetup.tests.othercave.doctest08 import Cave
>>> Cave
<class 'z3c.testsetup...doctest08.Cave'>
"""
class Cave(object):
"""A Cave.
A cave has a number::
>>> hasattr(Cave, 'number')
True
"""
number = None
def __init__(self, number):
"""Create a Cave.
We have to give a number if we create a cave::
>>> c = Cave(12)
>>> c.number
12
"""
self.number = number
| """
Doctests in a Python module
===========================
We can place doctests also in Python modules.
:doctest:
Here the Cave class is defined::
>>> from z3c.testsetup.tests.othercave.doctest08 import Cave
>>> Cave
<class 'z3c.testsetup...doctest08.Cave'>
"""
class Cave(object):
"""A Cave.
A cave has a number::
>>> hasattr(Cave, 'number')
True
"""
number = None
def __init__(self, number):
"""Create a Cave.
We have to give a number if we create a cave::
>>> c = Cave(12)
>>> c.number
12
"""
self.number = number | en | 0.665659 | Doctests in a Python module =========================== We can place doctests also in Python modules. :doctest: Here the Cave class is defined:: >>> from z3c.testsetup.tests.othercave.doctest08 import Cave >>> Cave <class 'z3c.testsetup...doctest08.Cave'> A Cave. A cave has a number:: >>> hasattr(Cave, 'number') True Create a Cave. We have to give a number if we create a cave:: >>> c = Cave(12) >>> c.number 12 | 3.360507 | 3 |
scripts/test_cases.py | isb-cgc/ISB-CGC-API | 2 | 6613939 | from datetime import datetime
TEST_CASES_BY_PATH = {
'/cohorts': {
'POST': {
'TCGA-disease-code': {
'name': 'Test cohort TCGA-disease-code for {}'.format(datetime.now()),
'filters': {
'TCGA': {'disease_code': ['BRCA']}
}
},
'TARGET-case-barcode': {
'name': 'Test cohort TARGET-case-barcode for {}'.format(datetime.now()),
'filters': {
'TARGET': {'case_barcode': ['TARGET-52-PAKHTL']}
}
},
'CCLE-sample-barcode': {
'name': 'Test cohort CCLE-sample-barcode for {}'.format(datetime.now()),
'filters': {
'CCLE': {'sample_barcode': ['CCLE-22Rv1','CCLE-42-MG-BA','CCLE-769-P','CCLE-A-253']}
}
},
'TCGA-bmi-btw': {
'name': 'Test cohort TCGA-bmi-btw for {}'.format(datetime.now()),
'filters': {
'TCGA': {'bmi_btw': ['15','25']}
}
},
'TARGET-wbc-at-diagnosis-gte': {
'name': 'Test cohort TARGET-wbc-at-diagnosis-gte for {}'.format(datetime.now()),
'filters': {
'TARGET': {'wbc_at_diagnosis': ['500']}
}
}
},
'PATCH': {
'rename': {
'name': 'Renamed Cohort'
},
'filter_expansion': {
'filters': {
'disease_code': ['LUAD']
}
}
},
},
'/cohorts/preview': {
'POST': {
'multi-program': {
"filters": {
"TCGA": {
"bmi_btw": ['15', '25']
},
"TARGET": {
"disease_code": "LAML"
},
"CCLE": {}
}
}
}
},
'/samples/{sample_barcode}': {
'GET': {
'TCGA-single-sample': {'sample_barcode': 'TCGA-DX-A23U-10A'},
'CCLE-single-sample': {'sample_barcode': 'CCLE-253J'},
'TARGET-single-sample': {'sample_barcode': 'TARGET-52-PAREWI-01A'}
},
},
'/samples': {
'POST': {
'TCGA-multi-sample': {
'sample_barcodes': ['TCGA-DX-A23U-10A', 'TCGA-WK-A8XQ-10A']
},
'CCLE-multi-sample': {
'sample_barcodes': ['CCLE-8-MG-BA', 'CCLE-253J', 'CCLE-A3/KAW']
},
'TARGET-multi-sample': {
'sample_barcodes': ['TARGET-52-PAREWI-01A', 'TARGET-52-PATBLF-10A', 'TARGET-52-PATDVL-01A']
}
}
},
'/cases/{case_barcode}': {
'GET': {
'TCGA-single-case': {'case_barcode': 'TCGA-DX-A23U'},
'CCLE-single-case': {'case_barcode': 'A-204'},
'TARGET-single-case': {'case_barcode': 'TARGET-52-PAREWI'}
},
},
'/cases': {
'POST': {
'TCGA-multi-sample': {
'case_barcodes': ['TCGA-DX-A23U', 'TCGA-WK-A8XQ']
},
'CCLE-multi-sample': {
'case_barcodes': ['A-204', '769-P', 'A3/KAW']
},
'TARGET-multi-sample': {
'case_barcodes': ['TARGET-52-PAREWI', 'TARGET-52-PATBLF', 'TARGET-52-PATDVL']
}
}
},
'/cohorts/{cohort_id}/file_manifest': {
'POST': {
'TCGA-file-size-lte': {
'filters': {
'program_name': ['TCGA'],
'file_size_lte': ['5000000']
}
},
'disease-code': {
'filters': {
'disease_code': ['BRCA']
}
},
}
},
'/files/paths/{file_uuid}': {
'GET': {
'TARGET-file-uuid': {
'file_uuid': '20f1cdc2-2900-4f48-9bd4-66a406bf7a61'
},
'TCGA-file-uuid': {
'file_uuid': 'f7863ca3-3297-40c5-8690-f1cedb32f577'
},
'CCLE-file-uuid': {
'file_uuid': '3aa3c169-7945-4ff3-9787-48270f776aa2'
}
}
},
'/files/paths': {
'POST': {
'TARGET-file-uuids': {
'uuids': ['20f1cdc2-2900-4f48-9bd4-66a406bf7a61', '27e8a6c4-2ca7-4b7c-8f41-ec53fb4faa66', 'e3e6154c-ac76-4d0d-bd40-8dc213c35197']
},
'TCGA-file-uuids': {
'uuids': ['f7863ca3-3297-40c5-8690-f1cedb32f577', '3cfdd784-2aae-4e59-9eb6-733736b7ac37', '78b382a4-1d0e-4946-8c22-510d05dccc09']
},
'CCLE-file-uuids': {
'uuids': ['3aa3c169-7945-4ff3-9787-48270f776aa2', '6daaeb4d-d66f-4efa-b1fe-ada91f1236ba', 'e6f3246e-59f7-4c6b-90fb-88e441b48522']
}
}
},
'/users/gcp/validate/{gcp_id}': {
'GET': {
'should-fail': {
'gcp_id': 'gibberish_nonsense_id_sfgdfgertergdvg34t'
},
'should-pass': {
'gcp_id': 'cgc-05-0016'
}
}
}
} | from datetime import datetime
TEST_CASES_BY_PATH = {
'/cohorts': {
'POST': {
'TCGA-disease-code': {
'name': 'Test cohort TCGA-disease-code for {}'.format(datetime.now()),
'filters': {
'TCGA': {'disease_code': ['BRCA']}
}
},
'TARGET-case-barcode': {
'name': 'Test cohort TARGET-case-barcode for {}'.format(datetime.now()),
'filters': {
'TARGET': {'case_barcode': ['TARGET-52-PAKHTL']}
}
},
'CCLE-sample-barcode': {
'name': 'Test cohort CCLE-sample-barcode for {}'.format(datetime.now()),
'filters': {
'CCLE': {'sample_barcode': ['CCLE-22Rv1','CCLE-42-MG-BA','CCLE-769-P','CCLE-A-253']}
}
},
'TCGA-bmi-btw': {
'name': 'Test cohort TCGA-bmi-btw for {}'.format(datetime.now()),
'filters': {
'TCGA': {'bmi_btw': ['15','25']}
}
},
'TARGET-wbc-at-diagnosis-gte': {
'name': 'Test cohort TARGET-wbc-at-diagnosis-gte for {}'.format(datetime.now()),
'filters': {
'TARGET': {'wbc_at_diagnosis': ['500']}
}
}
},
'PATCH': {
'rename': {
'name': 'Renamed Cohort'
},
'filter_expansion': {
'filters': {
'disease_code': ['LUAD']
}
}
},
},
'/cohorts/preview': {
'POST': {
'multi-program': {
"filters": {
"TCGA": {
"bmi_btw": ['15', '25']
},
"TARGET": {
"disease_code": "LAML"
},
"CCLE": {}
}
}
}
},
'/samples/{sample_barcode}': {
'GET': {
'TCGA-single-sample': {'sample_barcode': 'TCGA-DX-A23U-10A'},
'CCLE-single-sample': {'sample_barcode': 'CCLE-253J'},
'TARGET-single-sample': {'sample_barcode': 'TARGET-52-PAREWI-01A'}
},
},
'/samples': {
'POST': {
'TCGA-multi-sample': {
'sample_barcodes': ['TCGA-DX-A23U-10A', 'TCGA-WK-A8XQ-10A']
},
'CCLE-multi-sample': {
'sample_barcodes': ['CCLE-8-MG-BA', 'CCLE-253J', 'CCLE-A3/KAW']
},
'TARGET-multi-sample': {
'sample_barcodes': ['TARGET-52-PAREWI-01A', 'TARGET-52-PATBLF-10A', 'TARGET-52-PATDVL-01A']
}
}
},
'/cases/{case_barcode}': {
'GET': {
'TCGA-single-case': {'case_barcode': 'TCGA-DX-A23U'},
'CCLE-single-case': {'case_barcode': 'A-204'},
'TARGET-single-case': {'case_barcode': 'TARGET-52-PAREWI'}
},
},
'/cases': {
'POST': {
'TCGA-multi-sample': {
'case_barcodes': ['TCGA-DX-A23U', 'TCGA-WK-A8XQ']
},
'CCLE-multi-sample': {
'case_barcodes': ['A-204', '769-P', 'A3/KAW']
},
'TARGET-multi-sample': {
'case_barcodes': ['TARGET-52-PAREWI', 'TARGET-52-PATBLF', 'TARGET-52-PATDVL']
}
}
},
'/cohorts/{cohort_id}/file_manifest': {
'POST': {
'TCGA-file-size-lte': {
'filters': {
'program_name': ['TCGA'],
'file_size_lte': ['5000000']
}
},
'disease-code': {
'filters': {
'disease_code': ['BRCA']
}
},
}
},
'/files/paths/{file_uuid}': {
'GET': {
'TARGET-file-uuid': {
'file_uuid': '20f1cdc2-2900-4f48-9bd4-66a406bf7a61'
},
'TCGA-file-uuid': {
'file_uuid': 'f7863ca3-3297-40c5-8690-f1cedb32f577'
},
'CCLE-file-uuid': {
'file_uuid': '3aa3c169-7945-4ff3-9787-48270f776aa2'
}
}
},
'/files/paths': {
'POST': {
'TARGET-file-uuids': {
'uuids': ['20f1cdc2-2900-4f48-9bd4-66a406bf7a61', '27e8a6c4-2ca7-4b7c-8f41-ec53fb4faa66', 'e3e6154c-ac76-4d0d-bd40-8dc213c35197']
},
'TCGA-file-uuids': {
'uuids': ['f7863ca3-3297-40c5-8690-f1cedb32f577', '3cfdd784-2aae-4e59-9eb6-733736b7ac37', '78b382a4-1d0e-4946-8c22-510d05dccc09']
},
'CCLE-file-uuids': {
'uuids': ['3aa3c169-7945-4ff3-9787-48270f776aa2', '6daaeb4d-d66f-4efa-b1fe-ada91f1236ba', 'e6f3246e-59f7-4c6b-90fb-88e441b48522']
}
}
},
'/users/gcp/validate/{gcp_id}': {
'GET': {
'should-fail': {
'gcp_id': 'gibberish_nonsense_id_sfgdfgertergdvg34t'
},
'should-pass': {
'gcp_id': 'cgc-05-0016'
}
}
}
} | none | 1 | 1.982915 | 2 | |
scripts/stopping/eval_model.py | SBUNetSys/DeQA | 1 | 6613940 | <reponame>SBUNetSys/DeQA
#!/usr/bin/env python3
import json
import argparse
import os
from utils import normalize
from utils import exact_match_score, regex_match_score, get_rank
from utils import slugify, aggregate, aggregate_ans
from utils import Tokenizer
from StoppingModel import EarlyStoppingModel
import torch
import time
from multiprocessing import Pool as ProcessPool
import sys
import numpy
ENCODING = "utf-8"
DOC_MEAN = 8.5142
DOC_STD = 2.8324
I_STD = 28.56
I_MEAN = 14.08
# Z_STD = 54659
# Z_MEAN = 669.91
Z_STD = 241297
Z_MEAN = 3164
# ANS_MEAN=86486
# ANS_STD=256258
ANS_MEAN = 11588614
ANS_STD = 98865053
# def batch_predict(data_line_, prediction_line_, model, feature_dir_, match_fn_):
# print("DEFUNCT BATCH_PREDICT")
# data = json.loads(data_line_)
# question = data['question']
# q_id = slugify(question)
# q_path = os.path.join(feature_dir_, '%s.json' % q_id)
# n_q = [0 for _ in Tokenizer.FEAT]
# if os.path.exists(q_path):
# q_data = open(q_path, encoding=ENCODING).read()
# record = json.loads(q_data)
# q_ner = record['ner']
# q_pos = record['pos']
# for feat in q_ner + q_pos:
# n_q[Tokenizer.FEAT_DICT[feat]] += 1
#
# answer = [normalize(a) for a in data['answer']]
# prediction = json.loads(prediction_line_)
# # ranked_prediction = sorted(prediction, key=lambda k: k['doc_score'])
# ranked_prediction = sorted(prediction, key=lambda k: k['doc_score'], reverse=True)
# correct_rank = get_rank(ranked_prediction, answer, match_fn_)
# total_count_ = 0
# correct_count_ = 0
#
# if correct_rank > 150:
# print("BAD")
# return 0, 0
# all_n_p = []
# all_n_a = []
#
# all_p_scores = []
# all_a_scores = []
# for i, entry in enumerate(ranked_prediction):
#
# if i + 1 > correct_rank:
# break
#
# doc_id = entry['doc_id']
# start = int(entry['start'])
# end = int(entry['end'])
# doc_score = entry['doc_score']
# ans_score = entry['span_score']
#
# p_pos = dict()
# p_ner = dict()
# feat_file = os.path.join(feature_dir_, '%s.json' % doc_id)
# if os.path.exists(feat_file):
# record = json.load(open(feat_file))
# p_ner[doc_id] = record['ner']
# p_pos[doc_id] = record['pos']
# n_p = [0 for _ in Tokenizer.FEAT]
# n_a = [0 for _ in Tokenizer.FEAT]
# for feat in p_ner[doc_id] + p_pos[doc_id]:
# n_p[Tokenizer.FEAT_DICT[feat]] += 1
# for feat in p_ner[doc_id][start:end + 1] + p_pos[doc_id][start:end + 1]:
# n_a[Tokenizer.FEAT_DICT[feat]] += 1
#
# ################Calculate sample z score (t statistic) for answer score
# if all_a_scores == [] or len(all_a_scores) == 1: # dont use a_zscore feature at the beginning
# a_zscore = 0
# else:
# # sample_mean = numpy.mean(all_a_scores + [ans_score])
# sample_mean = numpy.mean(all_a_scores)
# # sample_std = numpy.std(all_a_scores + [ans_score])
# sample_std = numpy.std(all_a_scores)
# a_zscore = (ans_score - sample_mean) / sample_std
#
# corr_doc_score = (doc_score - DOC_MEAN) / DOC_STD
# a_zscore_t = torch.FloatTensor(list([a_zscore])) # 1
#
# corr_doc_score_t = torch.FloatTensor(list([corr_doc_score])) # 1
# ###############
#
# all_n_p.append(n_p)
# all_n_a.append(n_a)
#
# all_p_scores.append(doc_score)
# all_a_scores.append(ans_score)
#
# f_np = aggregate(all_n_p)
# f_na = aggregate(all_n_a)
# f_sp = aggregate(all_p_scores)
# f_sa = aggregate_ans(all_a_scores)
#
# # sp, nq, np, na, ha
# sp = torch.FloatTensor(f_sp) # 4x1
# sa = torch.FloatTensor(f_sa) # 2x1
#
# i_ft = torch.FloatTensor([i])
# i_std = (i - I_MEAN) / I_STD
# i_std = torch.FloatTensor([i_std])
#
# # OLD ONES NO GOOD
# # np = torch.FloatTensor(list(map(float, n_q))) # 4x58
# # na = torch.FloatTensor(f_np) # 4x58
# # nq = torch.FloatTensor(f_na) # 1x58
#
# np = torch.FloatTensor(f_np)
#
# # inputs = torch.cat([sp, sa, nq, np, na])
# # Uncomment this one
# # inputs = torch.cat([sp, nq, np, na, a_zscore_t])
# # inputs = torch.cat([sp, a_zscore_t])
# # inputs = torch.cat([sp, a_zscore_t])
#
# # inputs = torch.cat([corr_doc_score_t, a_zscore_t, i_ft])
# inputs = torch.cat([corr_doc_score_t, a_zscore_t])
#
# prob = model.predict(inputs, prob=True)
# print("Prob of STOP = {}, Correct Rank = {}, i = {}".format(prob, correct_rank, i))
# if prob > 0.5:
# # if prob > 0.75:
# if i + 1 >= correct_rank:
# correct_count_ += 1
# break
# total_count_ += 1
# return correct_count_, total_count_
def batch_predict_test(data_line_, prediction_line_, model, feature_dir_, match_fn_, stop_at=-1):
data = json.loads(data_line_)
# question = data['question']
# q_id = slugify(question)
# q_path = os.path.join(feature_dir_, '%s.json' % q_id)
# n_q = [0 for _ in Tokenizer.FEAT]
# if os.path.exists(q_path):
# q_data = open(q_path, encoding=ENCODING).read()
# record = json.loads(q_data)
# q_ner = record['ner']
# q_pos = record['pos']
# for feat in q_ner + q_pos:
# n_q[Tokenizer.FEAT_DICT[feat]] += 1
answer = [normalize(a) for a in data['answer']]
prediction = json.loads(prediction_line_)
ranked_prediction = sorted(prediction, key=lambda k: k['doc_score'], reverse=True)
correct_rank = get_rank(ranked_prediction, answer, match_fn_)
total_count_ = 0
correct_count_ = 0
if correct_rank > 150:
print("BAD")
return 0, 0, 0, ranked_prediction
# all_n_p = []
# all_n_a = []
all_p_scores = []
all_a_scores = []
all_probs = []
diff = 0
repeats = 0
all_spans = []
es_preds = []
stop_loc = 0
for i, entry in enumerate(ranked_prediction):
es_preds.append(entry)
# doc_id = entry['doc_id']
# start = int(entry['start'])
# end = int(entry['end'])
doc_score = entry['doc_score']
ans_score = entry['span_score']
prob = entry['prob']
span = entry['span']
if span in all_spans:
repeats += 1
all_spans.append(span)
all_probs.append(prob)
# print("Threshold 1000000")
# ans_score=min(ans_score, 1000000) #restrict to max of million
# p_pos = dict()
# p_ner = dict()
# feat_file = os.path.join(feature_dir_, '%s.json' % doc_id)
# if os.path.exists(feat_file):
# record = json.load(open(feat_file))
# p_ner[doc_id] = record['ner']
# p_pos[doc_id] = record['pos']
# n_p = [0 for _ in Tokenizer.FEAT]
# n_a = [0 for _ in Tokenizer.FEAT]
# for feat in p_ner[doc_id] + p_pos[doc_id]:
# n_p[Tokenizer.FEAT_DICT[feat]] += 1
# for feat in p_ner[doc_id][start:end + 1] + p_pos[doc_id][start:end + 1]:
# n_a[Tokenizer.FEAT_DICT[feat]] += 1
################Calculate sample z score (t statistic) for answer score
if all_a_scores == [] or len(all_a_scores) == 1: # dont use a_zscore feature at the beginning
a_zscore = 0
else:
# sample_mean = numpy.mean(all_a_scores + [ans_score])
sample_mean = numpy.mean(all_a_scores)
# sample_std = numpy.std(all_a_scores + [ans_score])
sample_std = numpy.std(all_a_scores)
# if sample_std != 0:
a_zscore = (ans_score - sample_mean) / sample_std
# else:
# a_zscore = 0
# if a_zscore != 0:
# az_norm = (a_zscore - Z_MEAN) / Z_STD
# else:
# az_norm = 0
# a_zscore_norm = torch.FloatTensor(list([az_norm])) # 1
corr_doc_score = (doc_score - DOC_MEAN) / DOC_STD
# ans_avg = (numpy.mean(all_a_scores + [ans_score]) - ANS_MEAN) / ANS_STD
a_zscore_t = torch.FloatTensor(list([a_zscore])) # 1
# ans_avg = torch.FloatTensor(list([ans_avg])) # 1
corr_doc_score_t = torch.FloatTensor(list([corr_doc_score])) # 1
# prob_avg = sum(all_probs) / len(all_probs)
# prob_avg = torch.FloatTensor([prob_avg])
# repeats_t = torch.FloatTensor([repeats])
###############
# all_n_p.append(n_p)
# all_n_a.append(n_a)
all_p_scores.append(doc_score)
all_a_scores.append(ans_score)
# f_np = aggregate(all_n_p)
# f_na = aggregate(all_n_a)
# f_sp = aggregate(all_p_scores)
# f_sa = aggregate_ans(all_a_scores)
# sp, nq, np, na, ha
# sp = torch.FloatTensor(f_sp) # 4x1
# sa = torch.FloatTensor(f_sa) # 2x1
# i_ft = torch.FloatTensor([i])
# i_std = (i - I_MEAN) / I_STD
# i_std = torch.FloatTensor([i_std])
# OLD ONES NO GOOD
# np = torch.FloatTensor(list(map(float, n_q))) # 4x58
# na = torch.FloatTensor(f_np) # 4x58
# nq = torch.FloatTensor(f_na) # 1x58
# na = torch.FloatTensor(f_na) # 4x58
# np = torch.FloatTensor(f_np)
# nq = torch.FloatTensor(list(map(float, n_q))) # 4x58
# inputs = torch.cat([sp, sa, nq, np, na])
# Uncomment this one
# inputs = torch.cat([sp, nq, np, na, a_zscore_t])
# inputs = torch.cat([sp, a_zscore_t])
# inputs = torch.cat([sp, a_zscore_t])
# inputs = torch.cat([corr_doc_score_t, a_zscore_t, i_ft])
inputs = torch.cat([corr_doc_score_t, a_zscore_t])
prob = model.predict(inputs, prob=True)
# print(list(model.network.parameters()))
if stop_at <= 0:
print("Prob of STOP = {}, Correct Rank = {}, i = {}, answer_score = {}, REPEATS = {}".format(prob,
correct_rank,
i, ans_score,
repeats))
# if prob > 0.5:
if prob > 0.95:
if i + 1 >= correct_rank:
correct_count_ += 1
diff = i + 1 - correct_rank
print("stop_at <=0 prob > 0.45 CORRECT")
print("AVG ANS SCORE {}".format(numpy.mean(all_probs)))
print("STD ANS SCORE {}".format(numpy.std(all_probs)))
stop_loc = i + 1
break
elif i + 1 >= 40:
print("AVG ANS SCORE {}".format(numpy.mean(all_probs)))
print("STD ANS SCORE {}".format(numpy.std(all_probs)))
if i + 1 >= correct_rank:
correct_count_ += 1
print("stop_at <=0 prob <= 0.45 CORRECT")
diff = i + 1 - correct_rank
stop_loc = i + 1
break
else:
if i + 1 == stop_at:
# if prob > 0.75:
if i + 1 >= correct_rank:
correct_count_ += 1
diff = i + 1 - correct_rank
print("stop_at > 0, CORRECT")
stop_loc = i + 1
break
print("stop at: ", stop_loc)
assert stop_loc == len(es_preds)
total_count_ += 1
return correct_count_, total_count_, diff, es_preds
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--prediction_file',
help='prediction file, e.g. CuratedTrec-test-lstm.preds.txt')
parser.add_argument('-a', '--answer_file', help='data set with labels, e.g. CuratedTrec-test.txt')
parser.add_argument('-f', '--feature_dir', default=None,
help='dir that contains json features files, unzip squad.tgz or trec.tgz to get that dir')
parser.add_argument('-rg', '--regex', action='store_true', help='default to use exact match')
parser.add_argument('-m', '--model_file', default=None, help='stopping model')
parser.add_argument('-nm', '--no_multiprocess', action='store_true', help='default to use multiprocessing')
parser.add_argument('--stop_at', default=-1, type=int)
args = parser.parse_args()
match_func = regex_match_score if args.regex else exact_match_score
answer_file = args.answer_file
prediction_file = args.prediction_file
diffs = []
feature_dir = args.feature_dir
# if not os.path.exists(feature_dir):
# print('feature_dir does not exist!')
# exit(-1)
s = time.time()
eval_model = EarlyStoppingModel.load(args.model_file)
eval_model.network.cpu()
total_count = 0
correct_count = 0
# print('using multiprocessing...')
result_handles = []
# async_pool = ProcessPool()
for data_line, prediction_line in zip(open(answer_file, encoding=ENCODING),
open(prediction_file, encoding=ENCODING)):
param = (data_line, prediction_line, eval_model, feature_dir, match_func, args.stop_at)
# handle = async_pool.apply_async(batch_predict, param)
handle = batch_predict_test(*param)
result_handles.append(handle)
with open(prediction_file + '.es.txt', 'w') as f:
for result in result_handles:
# correct, total = result.get()
correct, total, dif, es_prediction = result
f.write(json.dumps(es_prediction) + '\n')
correct_count += correct
total_count += total
if total > 0:
diffs.append(dif)
# if total_count % 100 ==0:
# print('processed %d/%d, %2.4f' % (correct_count, total_count, correct_count / total_count))
# sys.stdout.flush()
e = time.time()
print('correct_count:', correct_count, 'total_count:', total_count, 'acc:', correct_count / total_count)
print('Diff Mean: ', numpy.mean(diffs), 'diff std:', numpy.std(diffs))
print('took %.4f s' % (e - s))
| #!/usr/bin/env python3
import json
import argparse
import os
from utils import normalize
from utils import exact_match_score, regex_match_score, get_rank
from utils import slugify, aggregate, aggregate_ans
from utils import Tokenizer
from StoppingModel import EarlyStoppingModel
import torch
import time
from multiprocessing import Pool as ProcessPool
import sys
import numpy
ENCODING = "utf-8"
DOC_MEAN = 8.5142
DOC_STD = 2.8324
I_STD = 28.56
I_MEAN = 14.08
# Z_STD = 54659
# Z_MEAN = 669.91
Z_STD = 241297
Z_MEAN = 3164
# ANS_MEAN=86486
# ANS_STD=256258
ANS_MEAN = 11588614
ANS_STD = 98865053
# def batch_predict(data_line_, prediction_line_, model, feature_dir_, match_fn_):
# print("DEFUNCT BATCH_PREDICT")
# data = json.loads(data_line_)
# question = data['question']
# q_id = slugify(question)
# q_path = os.path.join(feature_dir_, '%s.json' % q_id)
# n_q = [0 for _ in Tokenizer.FEAT]
# if os.path.exists(q_path):
# q_data = open(q_path, encoding=ENCODING).read()
# record = json.loads(q_data)
# q_ner = record['ner']
# q_pos = record['pos']
# for feat in q_ner + q_pos:
# n_q[Tokenizer.FEAT_DICT[feat]] += 1
#
# answer = [normalize(a) for a in data['answer']]
# prediction = json.loads(prediction_line_)
# # ranked_prediction = sorted(prediction, key=lambda k: k['doc_score'])
# ranked_prediction = sorted(prediction, key=lambda k: k['doc_score'], reverse=True)
# correct_rank = get_rank(ranked_prediction, answer, match_fn_)
# total_count_ = 0
# correct_count_ = 0
#
# if correct_rank > 150:
# print("BAD")
# return 0, 0
# all_n_p = []
# all_n_a = []
#
# all_p_scores = []
# all_a_scores = []
# for i, entry in enumerate(ranked_prediction):
#
# if i + 1 > correct_rank:
# break
#
# doc_id = entry['doc_id']
# start = int(entry['start'])
# end = int(entry['end'])
# doc_score = entry['doc_score']
# ans_score = entry['span_score']
#
# p_pos = dict()
# p_ner = dict()
# feat_file = os.path.join(feature_dir_, '%s.json' % doc_id)
# if os.path.exists(feat_file):
# record = json.load(open(feat_file))
# p_ner[doc_id] = record['ner']
# p_pos[doc_id] = record['pos']
# n_p = [0 for _ in Tokenizer.FEAT]
# n_a = [0 for _ in Tokenizer.FEAT]
# for feat in p_ner[doc_id] + p_pos[doc_id]:
# n_p[Tokenizer.FEAT_DICT[feat]] += 1
# for feat in p_ner[doc_id][start:end + 1] + p_pos[doc_id][start:end + 1]:
# n_a[Tokenizer.FEAT_DICT[feat]] += 1
#
# ################Calculate sample z score (t statistic) for answer score
# if all_a_scores == [] or len(all_a_scores) == 1: # dont use a_zscore feature at the beginning
# a_zscore = 0
# else:
# # sample_mean = numpy.mean(all_a_scores + [ans_score])
# sample_mean = numpy.mean(all_a_scores)
# # sample_std = numpy.std(all_a_scores + [ans_score])
# sample_std = numpy.std(all_a_scores)
# a_zscore = (ans_score - sample_mean) / sample_std
#
# corr_doc_score = (doc_score - DOC_MEAN) / DOC_STD
# a_zscore_t = torch.FloatTensor(list([a_zscore])) # 1
#
# corr_doc_score_t = torch.FloatTensor(list([corr_doc_score])) # 1
# ###############
#
# all_n_p.append(n_p)
# all_n_a.append(n_a)
#
# all_p_scores.append(doc_score)
# all_a_scores.append(ans_score)
#
# f_np = aggregate(all_n_p)
# f_na = aggregate(all_n_a)
# f_sp = aggregate(all_p_scores)
# f_sa = aggregate_ans(all_a_scores)
#
# # sp, nq, np, na, ha
# sp = torch.FloatTensor(f_sp) # 4x1
# sa = torch.FloatTensor(f_sa) # 2x1
#
# i_ft = torch.FloatTensor([i])
# i_std = (i - I_MEAN) / I_STD
# i_std = torch.FloatTensor([i_std])
#
# # OLD ONES NO GOOD
# # np = torch.FloatTensor(list(map(float, n_q))) # 4x58
# # na = torch.FloatTensor(f_np) # 4x58
# # nq = torch.FloatTensor(f_na) # 1x58
#
# np = torch.FloatTensor(f_np)
#
# # inputs = torch.cat([sp, sa, nq, np, na])
# # Uncomment this one
# # inputs = torch.cat([sp, nq, np, na, a_zscore_t])
# # inputs = torch.cat([sp, a_zscore_t])
# # inputs = torch.cat([sp, a_zscore_t])
#
# # inputs = torch.cat([corr_doc_score_t, a_zscore_t, i_ft])
# inputs = torch.cat([corr_doc_score_t, a_zscore_t])
#
# prob = model.predict(inputs, prob=True)
# print("Prob of STOP = {}, Correct Rank = {}, i = {}".format(prob, correct_rank, i))
# if prob > 0.5:
# # if prob > 0.75:
# if i + 1 >= correct_rank:
# correct_count_ += 1
# break
# total_count_ += 1
# return correct_count_, total_count_
def batch_predict_test(data_line_, prediction_line_, model, feature_dir_, match_fn_, stop_at=-1):
data = json.loads(data_line_)
# question = data['question']
# q_id = slugify(question)
# q_path = os.path.join(feature_dir_, '%s.json' % q_id)
# n_q = [0 for _ in Tokenizer.FEAT]
# if os.path.exists(q_path):
# q_data = open(q_path, encoding=ENCODING).read()
# record = json.loads(q_data)
# q_ner = record['ner']
# q_pos = record['pos']
# for feat in q_ner + q_pos:
# n_q[Tokenizer.FEAT_DICT[feat]] += 1
answer = [normalize(a) for a in data['answer']]
prediction = json.loads(prediction_line_)
ranked_prediction = sorted(prediction, key=lambda k: k['doc_score'], reverse=True)
correct_rank = get_rank(ranked_prediction, answer, match_fn_)
total_count_ = 0
correct_count_ = 0
if correct_rank > 150:
print("BAD")
return 0, 0, 0, ranked_prediction
# all_n_p = []
# all_n_a = []
all_p_scores = []
all_a_scores = []
all_probs = []
diff = 0
repeats = 0
all_spans = []
es_preds = []
stop_loc = 0
for i, entry in enumerate(ranked_prediction):
es_preds.append(entry)
# doc_id = entry['doc_id']
# start = int(entry['start'])
# end = int(entry['end'])
doc_score = entry['doc_score']
ans_score = entry['span_score']
prob = entry['prob']
span = entry['span']
if span in all_spans:
repeats += 1
all_spans.append(span)
all_probs.append(prob)
# print("Threshold 1000000")
# ans_score=min(ans_score, 1000000) #restrict to max of million
# p_pos = dict()
# p_ner = dict()
# feat_file = os.path.join(feature_dir_, '%s.json' % doc_id)
# if os.path.exists(feat_file):
# record = json.load(open(feat_file))
# p_ner[doc_id] = record['ner']
# p_pos[doc_id] = record['pos']
# n_p = [0 for _ in Tokenizer.FEAT]
# n_a = [0 for _ in Tokenizer.FEAT]
# for feat in p_ner[doc_id] + p_pos[doc_id]:
# n_p[Tokenizer.FEAT_DICT[feat]] += 1
# for feat in p_ner[doc_id][start:end + 1] + p_pos[doc_id][start:end + 1]:
# n_a[Tokenizer.FEAT_DICT[feat]] += 1
################Calculate sample z score (t statistic) for answer score
if all_a_scores == [] or len(all_a_scores) == 1: # dont use a_zscore feature at the beginning
a_zscore = 0
else:
# sample_mean = numpy.mean(all_a_scores + [ans_score])
sample_mean = numpy.mean(all_a_scores)
# sample_std = numpy.std(all_a_scores + [ans_score])
sample_std = numpy.std(all_a_scores)
# if sample_std != 0:
a_zscore = (ans_score - sample_mean) / sample_std
# else:
# a_zscore = 0
# if a_zscore != 0:
# az_norm = (a_zscore - Z_MEAN) / Z_STD
# else:
# az_norm = 0
# a_zscore_norm = torch.FloatTensor(list([az_norm])) # 1
corr_doc_score = (doc_score - DOC_MEAN) / DOC_STD
# ans_avg = (numpy.mean(all_a_scores + [ans_score]) - ANS_MEAN) / ANS_STD
a_zscore_t = torch.FloatTensor(list([a_zscore])) # 1
# ans_avg = torch.FloatTensor(list([ans_avg])) # 1
corr_doc_score_t = torch.FloatTensor(list([corr_doc_score])) # 1
# prob_avg = sum(all_probs) / len(all_probs)
# prob_avg = torch.FloatTensor([prob_avg])
# repeats_t = torch.FloatTensor([repeats])
###############
# all_n_p.append(n_p)
# all_n_a.append(n_a)
all_p_scores.append(doc_score)
all_a_scores.append(ans_score)
# f_np = aggregate(all_n_p)
# f_na = aggregate(all_n_a)
# f_sp = aggregate(all_p_scores)
# f_sa = aggregate_ans(all_a_scores)
# sp, nq, np, na, ha
# sp = torch.FloatTensor(f_sp) # 4x1
# sa = torch.FloatTensor(f_sa) # 2x1
# i_ft = torch.FloatTensor([i])
# i_std = (i - I_MEAN) / I_STD
# i_std = torch.FloatTensor([i_std])
# OLD ONES NO GOOD
# np = torch.FloatTensor(list(map(float, n_q))) # 4x58
# na = torch.FloatTensor(f_np) # 4x58
# nq = torch.FloatTensor(f_na) # 1x58
# na = torch.FloatTensor(f_na) # 4x58
# np = torch.FloatTensor(f_np)
# nq = torch.FloatTensor(list(map(float, n_q))) # 4x58
# inputs = torch.cat([sp, sa, nq, np, na])
# Uncomment this one
# inputs = torch.cat([sp, nq, np, na, a_zscore_t])
# inputs = torch.cat([sp, a_zscore_t])
# inputs = torch.cat([sp, a_zscore_t])
# inputs = torch.cat([corr_doc_score_t, a_zscore_t, i_ft])
inputs = torch.cat([corr_doc_score_t, a_zscore_t])
prob = model.predict(inputs, prob=True)
# print(list(model.network.parameters()))
if stop_at <= 0:
print("Prob of STOP = {}, Correct Rank = {}, i = {}, answer_score = {}, REPEATS = {}".format(prob,
correct_rank,
i, ans_score,
repeats))
# if prob > 0.5:
if prob > 0.95:
if i + 1 >= correct_rank:
correct_count_ += 1
diff = i + 1 - correct_rank
print("stop_at <=0 prob > 0.45 CORRECT")
print("AVG ANS SCORE {}".format(numpy.mean(all_probs)))
print("STD ANS SCORE {}".format(numpy.std(all_probs)))
stop_loc = i + 1
break
elif i + 1 >= 40:
print("AVG ANS SCORE {}".format(numpy.mean(all_probs)))
print("STD ANS SCORE {}".format(numpy.std(all_probs)))
if i + 1 >= correct_rank:
correct_count_ += 1
print("stop_at <=0 prob <= 0.45 CORRECT")
diff = i + 1 - correct_rank
stop_loc = i + 1
break
else:
if i + 1 == stop_at:
# if prob > 0.75:
if i + 1 >= correct_rank:
correct_count_ += 1
diff = i + 1 - correct_rank
print("stop_at > 0, CORRECT")
stop_loc = i + 1
break
print("stop at: ", stop_loc)
assert stop_loc == len(es_preds)
total_count_ += 1
return correct_count_, total_count_, diff, es_preds
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--prediction_file',
help='prediction file, e.g. CuratedTrec-test-lstm.preds.txt')
parser.add_argument('-a', '--answer_file', help='data set with labels, e.g. CuratedTrec-test.txt')
parser.add_argument('-f', '--feature_dir', default=None,
help='dir that contains json features files, unzip squad.tgz or trec.tgz to get that dir')
parser.add_argument('-rg', '--regex', action='store_true', help='default to use exact match')
parser.add_argument('-m', '--model_file', default=None, help='stopping model')
parser.add_argument('-nm', '--no_multiprocess', action='store_true', help='default to use multiprocessing')
parser.add_argument('--stop_at', default=-1, type=int)
args = parser.parse_args()
match_func = regex_match_score if args.regex else exact_match_score
answer_file = args.answer_file
prediction_file = args.prediction_file
diffs = []
feature_dir = args.feature_dir
# if not os.path.exists(feature_dir):
# print('feature_dir does not exist!')
# exit(-1)
s = time.time()
eval_model = EarlyStoppingModel.load(args.model_file)
eval_model.network.cpu()
total_count = 0
correct_count = 0
# print('using multiprocessing...')
result_handles = []
# async_pool = ProcessPool()
for data_line, prediction_line in zip(open(answer_file, encoding=ENCODING),
open(prediction_file, encoding=ENCODING)):
param = (data_line, prediction_line, eval_model, feature_dir, match_func, args.stop_at)
# handle = async_pool.apply_async(batch_predict, param)
handle = batch_predict_test(*param)
result_handles.append(handle)
with open(prediction_file + '.es.txt', 'w') as f:
for result in result_handles:
# correct, total = result.get()
correct, total, dif, es_prediction = result
f.write(json.dumps(es_prediction) + '\n')
correct_count += correct
total_count += total
if total > 0:
diffs.append(dif)
# if total_count % 100 ==0:
# print('processed %d/%d, %2.4f' % (correct_count, total_count, correct_count / total_count))
# sys.stdout.flush()
e = time.time()
print('correct_count:', correct_count, 'total_count:', total_count, 'acc:', correct_count / total_count)
print('Diff Mean: ', numpy.mean(diffs), 'diff std:', numpy.std(diffs))
print('took %.4f s' % (e - s)) | en | 0.433887 | #!/usr/bin/env python3 # Z_STD = 54659 # Z_MEAN = 669.91 # ANS_MEAN=86486 # ANS_STD=256258 # def batch_predict(data_line_, prediction_line_, model, feature_dir_, match_fn_): # print("DEFUNCT BATCH_PREDICT") # data = json.loads(data_line_) # question = data['question'] # q_id = slugify(question) # q_path = os.path.join(feature_dir_, '%s.json' % q_id) # n_q = [0 for _ in Tokenizer.FEAT] # if os.path.exists(q_path): # q_data = open(q_path, encoding=ENCODING).read() # record = json.loads(q_data) # q_ner = record['ner'] # q_pos = record['pos'] # for feat in q_ner + q_pos: # n_q[Tokenizer.FEAT_DICT[feat]] += 1 # # answer = [normalize(a) for a in data['answer']] # prediction = json.loads(prediction_line_) # # ranked_prediction = sorted(prediction, key=lambda k: k['doc_score']) # ranked_prediction = sorted(prediction, key=lambda k: k['doc_score'], reverse=True) # correct_rank = get_rank(ranked_prediction, answer, match_fn_) # total_count_ = 0 # correct_count_ = 0 # # if correct_rank > 150: # print("BAD") # return 0, 0 # all_n_p = [] # all_n_a = [] # # all_p_scores = [] # all_a_scores = [] # for i, entry in enumerate(ranked_prediction): # # if i + 1 > correct_rank: # break # # doc_id = entry['doc_id'] # start = int(entry['start']) # end = int(entry['end']) # doc_score = entry['doc_score'] # ans_score = entry['span_score'] # # p_pos = dict() # p_ner = dict() # feat_file = os.path.join(feature_dir_, '%s.json' % doc_id) # if os.path.exists(feat_file): # record = json.load(open(feat_file)) # p_ner[doc_id] = record['ner'] # p_pos[doc_id] = record['pos'] # n_p = [0 for _ in Tokenizer.FEAT] # n_a = [0 for _ in Tokenizer.FEAT] # for feat in p_ner[doc_id] + p_pos[doc_id]: # n_p[Tokenizer.FEAT_DICT[feat]] += 1 # for feat in p_ner[doc_id][start:end + 1] + p_pos[doc_id][start:end + 1]: # n_a[Tokenizer.FEAT_DICT[feat]] += 1 # # ################Calculate sample z score (t statistic) for answer score # if all_a_scores == [] or len(all_a_scores) == 1: # dont use a_zscore feature at the beginning # a_zscore = 0 # else: # # sample_mean = numpy.mean(all_a_scores + [ans_score]) # sample_mean = numpy.mean(all_a_scores) # # sample_std = numpy.std(all_a_scores + [ans_score]) # sample_std = numpy.std(all_a_scores) # a_zscore = (ans_score - sample_mean) / sample_std # # corr_doc_score = (doc_score - DOC_MEAN) / DOC_STD # a_zscore_t = torch.FloatTensor(list([a_zscore])) # 1 # # corr_doc_score_t = torch.FloatTensor(list([corr_doc_score])) # 1 # ############### # # all_n_p.append(n_p) # all_n_a.append(n_a) # # all_p_scores.append(doc_score) # all_a_scores.append(ans_score) # # f_np = aggregate(all_n_p) # f_na = aggregate(all_n_a) # f_sp = aggregate(all_p_scores) # f_sa = aggregate_ans(all_a_scores) # # # sp, nq, np, na, ha # sp = torch.FloatTensor(f_sp) # 4x1 # sa = torch.FloatTensor(f_sa) # 2x1 # # i_ft = torch.FloatTensor([i]) # i_std = (i - I_MEAN) / I_STD # i_std = torch.FloatTensor([i_std]) # # # OLD ONES NO GOOD # # np = torch.FloatTensor(list(map(float, n_q))) # 4x58 # # na = torch.FloatTensor(f_np) # 4x58 # # nq = torch.FloatTensor(f_na) # 1x58 # # np = torch.FloatTensor(f_np) # # # inputs = torch.cat([sp, sa, nq, np, na]) # # Uncomment this one # # inputs = torch.cat([sp, nq, np, na, a_zscore_t]) # # inputs = torch.cat([sp, a_zscore_t]) # # inputs = torch.cat([sp, a_zscore_t]) # # # inputs = torch.cat([corr_doc_score_t, a_zscore_t, i_ft]) # inputs = torch.cat([corr_doc_score_t, a_zscore_t]) # # prob = model.predict(inputs, prob=True) # print("Prob of STOP = {}, Correct Rank = {}, i = {}".format(prob, correct_rank, i)) # if prob > 0.5: # # if prob > 0.75: # if i + 1 >= correct_rank: # correct_count_ += 1 # break # total_count_ += 1 # return correct_count_, total_count_ # question = data['question'] # q_id = slugify(question) # q_path = os.path.join(feature_dir_, '%s.json' % q_id) # n_q = [0 for _ in Tokenizer.FEAT] # if os.path.exists(q_path): # q_data = open(q_path, encoding=ENCODING).read() # record = json.loads(q_data) # q_ner = record['ner'] # q_pos = record['pos'] # for feat in q_ner + q_pos: # n_q[Tokenizer.FEAT_DICT[feat]] += 1 # all_n_p = [] # all_n_a = [] # doc_id = entry['doc_id'] # start = int(entry['start']) # end = int(entry['end']) # print("Threshold 1000000") # ans_score=min(ans_score, 1000000) #restrict to max of million # p_pos = dict() # p_ner = dict() # feat_file = os.path.join(feature_dir_, '%s.json' % doc_id) # if os.path.exists(feat_file): # record = json.load(open(feat_file)) # p_ner[doc_id] = record['ner'] # p_pos[doc_id] = record['pos'] # n_p = [0 for _ in Tokenizer.FEAT] # n_a = [0 for _ in Tokenizer.FEAT] # for feat in p_ner[doc_id] + p_pos[doc_id]: # n_p[Tokenizer.FEAT_DICT[feat]] += 1 # for feat in p_ner[doc_id][start:end + 1] + p_pos[doc_id][start:end + 1]: # n_a[Tokenizer.FEAT_DICT[feat]] += 1 ################Calculate sample z score (t statistic) for answer score # dont use a_zscore feature at the beginning # sample_mean = numpy.mean(all_a_scores + [ans_score]) # sample_std = numpy.std(all_a_scores + [ans_score]) # if sample_std != 0: # else: # a_zscore = 0 # if a_zscore != 0: # az_norm = (a_zscore - Z_MEAN) / Z_STD # else: # az_norm = 0 # a_zscore_norm = torch.FloatTensor(list([az_norm])) # 1 # ans_avg = (numpy.mean(all_a_scores + [ans_score]) - ANS_MEAN) / ANS_STD # 1 # ans_avg = torch.FloatTensor(list([ans_avg])) # 1 # 1 # prob_avg = sum(all_probs) / len(all_probs) # prob_avg = torch.FloatTensor([prob_avg]) # repeats_t = torch.FloatTensor([repeats]) ############### # all_n_p.append(n_p) # all_n_a.append(n_a) # f_np = aggregate(all_n_p) # f_na = aggregate(all_n_a) # f_sp = aggregate(all_p_scores) # f_sa = aggregate_ans(all_a_scores) # sp, nq, np, na, ha # sp = torch.FloatTensor(f_sp) # 4x1 # sa = torch.FloatTensor(f_sa) # 2x1 # i_ft = torch.FloatTensor([i]) # i_std = (i - I_MEAN) / I_STD # i_std = torch.FloatTensor([i_std]) # OLD ONES NO GOOD # np = torch.FloatTensor(list(map(float, n_q))) # 4x58 # na = torch.FloatTensor(f_np) # 4x58 # nq = torch.FloatTensor(f_na) # 1x58 # na = torch.FloatTensor(f_na) # 4x58 # np = torch.FloatTensor(f_np) # nq = torch.FloatTensor(list(map(float, n_q))) # 4x58 # inputs = torch.cat([sp, sa, nq, np, na]) # Uncomment this one # inputs = torch.cat([sp, nq, np, na, a_zscore_t]) # inputs = torch.cat([sp, a_zscore_t]) # inputs = torch.cat([sp, a_zscore_t]) # inputs = torch.cat([corr_doc_score_t, a_zscore_t, i_ft]) # print(list(model.network.parameters())) # if prob > 0.5: # if prob > 0.75: # if not os.path.exists(feature_dir): # print('feature_dir does not exist!') # exit(-1) # print('using multiprocessing...') # async_pool = ProcessPool() # handle = async_pool.apply_async(batch_predict, param) # correct, total = result.get() # if total_count % 100 ==0: # print('processed %d/%d, %2.4f' % (correct_count, total_count, correct_count / total_count)) # sys.stdout.flush() | 2.008425 | 2 |
homepage/portfolio/migrations/0056_auto_20200706_1737.py | FabianVolkers/portfolio | 0 | 6613941 | # Generated by Django 3.0.7 on 2020-07-06 17:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('portfolio', '0055_auto_20200706_1729'),
]
operations = [
migrations.RenameModel(
old_name='NavBarEdit',
new_name='LinkEdit',
),
migrations.RemoveField(
model_name='footerlink',
name='footer_edit',
),
migrations.RemoveField(
model_name='navlink',
name='navbar',
),
migrations.AddField(
model_name='footerlink',
name='link_edit',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='footer_links', to='portfolio.LinkEdit'),
),
migrations.AddField(
model_name='navlink',
name='link_edit',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='navbar_links', to='portfolio.LinkEdit'),
),
migrations.DeleteModel(
name='FooterEdit',
),
]
| # Generated by Django 3.0.7 on 2020-07-06 17:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('portfolio', '0055_auto_20200706_1729'),
]
operations = [
migrations.RenameModel(
old_name='NavBarEdit',
new_name='LinkEdit',
),
migrations.RemoveField(
model_name='footerlink',
name='footer_edit',
),
migrations.RemoveField(
model_name='navlink',
name='navbar',
),
migrations.AddField(
model_name='footerlink',
name='link_edit',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='footer_links', to='portfolio.LinkEdit'),
),
migrations.AddField(
model_name='navlink',
name='link_edit',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='navbar_links', to='portfolio.LinkEdit'),
),
migrations.DeleteModel(
name='FooterEdit',
),
]
| en | 0.819871 | # Generated by Django 3.0.7 on 2020-07-06 17:37 | 1.512685 | 2 |
decomponer.py | daniela2001-png/PYTHON-REVIEW-TOPICS | 0 | 6613942 | <filename>decomponer.py
#!/usr/bin/python3
class Auto:
def __init__(self, modelo: str, marca: str, placa: str):
self.modelo = modelo
self.marca = marca
self.placa = placa
self.motor = Motor(4).info_motor()
def encender(self, bool: bool) -> str: # esta funcion retornara un 'str'
result = "El auto esta encendido" if bool else "El auto esta apagado"
return result
class Motor:
def __init__(self, num_cilindros: int, tipo="gasolina"):
self.num_cilindros = num_cilindros
self.tipo = tipo
self.__temperatura = 20
def info_motor(self) -> str: # esta funcion retornara un 'str'
return f"Su motor es tipo {self.tipo} y tiene {self.num_cilindros} cilindros"
if __name__ == "__main__":
auto = Auto(modelo="hi", marca="bmw", placa="BTL357")
print(auto.motor)
motor = Motor(4)
print(motor.info_motor())
# si queremos acceder al valor que tiene la varibale privada "temperatura"
# solo miramos con el metodo dir(objeto) de como esta reescrita esa
# instacia
# print(dir(motor)) # _Motor__temperatura
# aqui hemos accedido al valor de la variable privada :3
print(motor._Motor__temperatura)
print(auto.encender(True)) # El auto esta encendidoç
| <filename>decomponer.py
#!/usr/bin/python3
class Auto:
def __init__(self, modelo: str, marca: str, placa: str):
self.modelo = modelo
self.marca = marca
self.placa = placa
self.motor = Motor(4).info_motor()
def encender(self, bool: bool) -> str: # esta funcion retornara un 'str'
result = "El auto esta encendido" if bool else "El auto esta apagado"
return result
class Motor:
def __init__(self, num_cilindros: int, tipo="gasolina"):
self.num_cilindros = num_cilindros
self.tipo = tipo
self.__temperatura = 20
def info_motor(self) -> str: # esta funcion retornara un 'str'
return f"Su motor es tipo {self.tipo} y tiene {self.num_cilindros} cilindros"
if __name__ == "__main__":
auto = Auto(modelo="hi", marca="bmw", placa="BTL357")
print(auto.motor)
motor = Motor(4)
print(motor.info_motor())
# si queremos acceder al valor que tiene la varibale privada "temperatura"
# solo miramos con el metodo dir(objeto) de como esta reescrita esa
# instacia
# print(dir(motor)) # _Motor__temperatura
# aqui hemos accedido al valor de la variable privada :3
print(motor._Motor__temperatura)
print(auto.encender(True)) # El auto esta encendidoç
| es | 0.739524 | #!/usr/bin/python3 # esta funcion retornara un 'str' # esta funcion retornara un 'str' # si queremos acceder al valor que tiene la varibale privada "temperatura" # solo miramos con el metodo dir(objeto) de como esta reescrita esa # instacia # print(dir(motor)) # _Motor__temperatura # aqui hemos accedido al valor de la variable privada :3 # El auto esta encendidoç | 3.752742 | 4 |
data_processor.py | LukeStebbing/Ergometer | 0 | 6613943 | <gh_stars>0
import asyncio
import glob
import os.path as path
import os
import sys
# trim at startup
# mark irrecoverable data
class IntegrityError(Exception):
pass
# Read local events for "host", writing them to "broker"
async def publish_local_events(host, file_manager, broker):
position = await broker.host_position(host)
while True:
data = await file_manager.read(position)
# Handles reconnect internally.
position = await broker.write(host, data, position)
def die(check, msg):
if not check:
return
print(msg, file=sys.stderr)
os.exit(1)
# Read local events from "event_queue" in groups of "batch_size"
# and write them using "file_manager"
async def local_event_handler(host, event_queue, file_manager, batch_size):
while True:
to_write = [event_queue.get()]
for _ in range(batch_size - 1):
if event_queue.empty():
break
# Make this a list
to_write.append(event_queue.get_nowait())
file_manager.write(host, data)
# Read all changes from "broker" for other hosts and write them using
# "file_manager"
async def change_subscriber(broker, file_manager):
while True:
# Initialize.
positions = await file_manager.positions()
try:
# Read messages forever.
async for host, data, position in broker.read(positions, exclude=host):
file_manager.write(host, data, position=position)
except Exception as e:
file_manager.log(e)
# A single file. Does error handling around reads and writes as well as
# tracking the position. Writes are synchronous and reads are asynchronous.
class HostFile:
def __init__(self, host_path, error_event):
self.path = host_path
self.data_available = asyncio.Event()
self.error_event = error_event
@property
def size(self):
return os.stat(host_path).st_size
# REQUIRES: f is a file opened in "r+" mode
def safe_seek(self, f):
file_position = self.size
if file_position % 16 != 0:
file_position -= file_position % 16
f.seek(file_position)
corruption = f.read(16)
self.log(f"Corruption in file {self.path}: '{corruption}'")
f.seek(file_position)
return file_position
# data is an iterable of bytes objects.
def write(self, data, position=None):
for d in data:
die(type(d) == bytes, "Got non-bytes data to write: {d}")
# Open for reading and writing without truncating.
with open(self.path, "rb+") as f:
file_position = self.safe_seek(f)
to_write = b"".join(data)
to_write_len = len(to_write)
if position != file_position:
raise IntegrityError(
f"Tried to write at position {position} but the latest safe position is {file_position}"
)
if to_write_len % 16 != 0:
raise IntegrityError(
f"Tried to write {to_write_len} bytes to {self.path}, which is not divisible by 16"
)
bytes_written = f.write(to_write)
die(bytes_written == len(data), f"Failed to write to file {self.path}")
self.data_available.set()
self.data_available = asyncio.Event()
# Log an error message. A newline is automatically appended.
def log(self, msg):
error_path = os.path.join(self.storage_root, "error.log")
with open(error_path, "a") as f:
full_message = msg + "\n"
byte_written = f.write(full_message)
die(
bytes_written == len(full_message),
f"Failed to write to error log {error_path}",
)
self.error_event.set()
async def read(self, position, batch_size):
die(
position % 16 == 0,
f"Tried to read file {self.path} at position {position} which is not divisible by 16",
)
file_size = self.size
die(
position <= file_size,
f"Tried to read file {self.path} at position {position} which > {file_size}",
)
# Wait if there is no new data.
if file_size == position:
await self.data_available.wait()
data = ""
# Only read up to the file size, and do
# not include the last entry if it is corrupt.
# The corruption will be reported on the next
# write so this code does not do so.
file_size = self.size
if file_size % 16 != 0:
file_size -= file_size % 16
with open(self.path, "rb") as f:
f.seek(position)
desired_bytes = batch_size * 16
available_bytes = file_size - position
data = f.read(min(desired_bytes, available_bytes))
return data
# Reads and writes data for particular hosts.
class FileManager:
def __init__(self, host, storage_root):
self.storage_root = storage_root
self.host = host
for host_path in glob.glob(self.host_path("*")):
host, _, _ = host_path.rpartition(".hostlog")
self.hosts[host] = HostFile(host_path)
def host_path(self, host):
return os.path.join(self.storage_root, f"{host}.hostlog")
@property
def positions(self):
positions = {name: host.size for name, host in self.hosts.items()}
# If any files have an integrity error ignore the corrupt part. It
# will be logged later.
return {name: pos - (pos % 16) for name, pos in positions}
def write(self, host, data, position=None):
die(data[-1] == "\n", "TODO")
if host not in self.hosts:
self.hosts[host] = HostFile(self.host_path(host))
self.hosts[host].write(data, position)
async def read(self, host, position, batch_size):
if host not in self.hosts:
self.hosts[host] = HostFile(self.host_path(host))
return self.hosts[host].read(position, batch_size)
| import asyncio
import glob
import os.path as path
import os
import sys
# trim at startup
# mark irrecoverable data
class IntegrityError(Exception):
pass
# Read local events for "host", writing them to "broker"
async def publish_local_events(host, file_manager, broker):
position = await broker.host_position(host)
while True:
data = await file_manager.read(position)
# Handles reconnect internally.
position = await broker.write(host, data, position)
def die(check, msg):
if not check:
return
print(msg, file=sys.stderr)
os.exit(1)
# Read local events from "event_queue" in groups of "batch_size"
# and write them using "file_manager"
async def local_event_handler(host, event_queue, file_manager, batch_size):
while True:
to_write = [event_queue.get()]
for _ in range(batch_size - 1):
if event_queue.empty():
break
# Make this a list
to_write.append(event_queue.get_nowait())
file_manager.write(host, data)
# Read all changes from "broker" for other hosts and write them using
# "file_manager"
async def change_subscriber(broker, file_manager):
while True:
# Initialize.
positions = await file_manager.positions()
try:
# Read messages forever.
async for host, data, position in broker.read(positions, exclude=host):
file_manager.write(host, data, position=position)
except Exception as e:
file_manager.log(e)
# A single file. Does error handling around reads and writes as well as
# tracking the position. Writes are synchronous and reads are asynchronous.
class HostFile:
def __init__(self, host_path, error_event):
self.path = host_path
self.data_available = asyncio.Event()
self.error_event = error_event
@property
def size(self):
return os.stat(host_path).st_size
# REQUIRES: f is a file opened in "r+" mode
def safe_seek(self, f):
file_position = self.size
if file_position % 16 != 0:
file_position -= file_position % 16
f.seek(file_position)
corruption = f.read(16)
self.log(f"Corruption in file {self.path}: '{corruption}'")
f.seek(file_position)
return file_position
# data is an iterable of bytes objects.
def write(self, data, position=None):
for d in data:
die(type(d) == bytes, "Got non-bytes data to write: {d}")
# Open for reading and writing without truncating.
with open(self.path, "rb+") as f:
file_position = self.safe_seek(f)
to_write = b"".join(data)
to_write_len = len(to_write)
if position != file_position:
raise IntegrityError(
f"Tried to write at position {position} but the latest safe position is {file_position}"
)
if to_write_len % 16 != 0:
raise IntegrityError(
f"Tried to write {to_write_len} bytes to {self.path}, which is not divisible by 16"
)
bytes_written = f.write(to_write)
die(bytes_written == len(data), f"Failed to write to file {self.path}")
self.data_available.set()
self.data_available = asyncio.Event()
# Log an error message. A newline is automatically appended.
def log(self, msg):
error_path = os.path.join(self.storage_root, "error.log")
with open(error_path, "a") as f:
full_message = msg + "\n"
byte_written = f.write(full_message)
die(
bytes_written == len(full_message),
f"Failed to write to error log {error_path}",
)
self.error_event.set()
async def read(self, position, batch_size):
die(
position % 16 == 0,
f"Tried to read file {self.path} at position {position} which is not divisible by 16",
)
file_size = self.size
die(
position <= file_size,
f"Tried to read file {self.path} at position {position} which > {file_size}",
)
# Wait if there is no new data.
if file_size == position:
await self.data_available.wait()
data = ""
# Only read up to the file size, and do
# not include the last entry if it is corrupt.
# The corruption will be reported on the next
# write so this code does not do so.
file_size = self.size
if file_size % 16 != 0:
file_size -= file_size % 16
with open(self.path, "rb") as f:
f.seek(position)
desired_bytes = batch_size * 16
available_bytes = file_size - position
data = f.read(min(desired_bytes, available_bytes))
return data
# Reads and writes data for particular hosts.
class FileManager:
def __init__(self, host, storage_root):
self.storage_root = storage_root
self.host = host
for host_path in glob.glob(self.host_path("*")):
host, _, _ = host_path.rpartition(".hostlog")
self.hosts[host] = HostFile(host_path)
def host_path(self, host):
return os.path.join(self.storage_root, f"{host}.hostlog")
@property
def positions(self):
positions = {name: host.size for name, host in self.hosts.items()}
# If any files have an integrity error ignore the corrupt part. It
# will be logged later.
return {name: pos - (pos % 16) for name, pos in positions}
def write(self, host, data, position=None):
die(data[-1] == "\n", "TODO")
if host not in self.hosts:
self.hosts[host] = HostFile(self.host_path(host))
self.hosts[host].write(data, position)
async def read(self, host, position, batch_size):
if host not in self.hosts:
self.hosts[host] = HostFile(self.host_path(host))
return self.hosts[host].read(position, batch_size) | en | 0.906154 | # trim at startup # mark irrecoverable data # Read local events for "host", writing them to "broker" # Handles reconnect internally. # Read local events from "event_queue" in groups of "batch_size" # and write them using "file_manager" # Make this a list # Read all changes from "broker" for other hosts and write them using # "file_manager" # Initialize. # Read messages forever. # A single file. Does error handling around reads and writes as well as # tracking the position. Writes are synchronous and reads are asynchronous. # REQUIRES: f is a file opened in "r+" mode # data is an iterable of bytes objects. # Open for reading and writing without truncating. # Log an error message. A newline is automatically appended. # Wait if there is no new data. # Only read up to the file size, and do # not include the last entry if it is corrupt. # The corruption will be reported on the next # write so this code does not do so. # Reads and writes data for particular hosts. # If any files have an integrity error ignore the corrupt part. It # will be logged later. | 2.436526 | 2 |
aps/test/svalbard_region_mask.py | kmunve/APS | 0 | 6613944 | import netCDF4
import numpy as np
import matplotlib.pyplot as plt
import georaster as gr
# data_nc = netCDF4.Dataset(r'Y:\tmp\kmu\meps\arome_arctic_pp_1km_latest.nc')
# la = data_nc.variables['land_area_fraction'][:]
#
# mask_nc = netCDF4.Dataset(r'N:\Prosjekter\APS\VarslinOmr2018Svalbard2.nc')
#
# m = mask_nc.variables['VarslingsOmr2018Land'][:]
#
# mask3003 = np.ma.masked_not_equal(m, 3003)
#
# la3003 = np.where(m==3003, 2, np.flipud(la))
# m3003 = np.where(m==3003, np.flipud(la), m)
# plt.imshow(m3003, vmin=3000, vmax= 3005)
# plt.show()
raster = r'N:\Prosjekter\APS\svalbard_regions\VarslingsOmr2018Land.tif'
rdata = gr.SingleBandRaster(raster)
#(xmin, xsize, x, ymax, y, ysize) = data.geot
# print(rdata.projection)
print(rdata.extent, rdata.srs)
print(rdata.srs.GetProjParm('central_meridian'))
print(rdata.ds)
plt.imshow(rdata.r, vmin=3000, vmax=3005)
plt.show()
### ...export to netCDF | import netCDF4
import numpy as np
import matplotlib.pyplot as plt
import georaster as gr
# data_nc = netCDF4.Dataset(r'Y:\tmp\kmu\meps\arome_arctic_pp_1km_latest.nc')
# la = data_nc.variables['land_area_fraction'][:]
#
# mask_nc = netCDF4.Dataset(r'N:\Prosjekter\APS\VarslinOmr2018Svalbard2.nc')
#
# m = mask_nc.variables['VarslingsOmr2018Land'][:]
#
# mask3003 = np.ma.masked_not_equal(m, 3003)
#
# la3003 = np.where(m==3003, 2, np.flipud(la))
# m3003 = np.where(m==3003, np.flipud(la), m)
# plt.imshow(m3003, vmin=3000, vmax= 3005)
# plt.show()
raster = r'N:\Prosjekter\APS\svalbard_regions\VarslingsOmr2018Land.tif'
rdata = gr.SingleBandRaster(raster)
#(xmin, xsize, x, ymax, y, ysize) = data.geot
# print(rdata.projection)
print(rdata.extent, rdata.srs)
print(rdata.srs.GetProjParm('central_meridian'))
print(rdata.ds)
plt.imshow(rdata.r, vmin=3000, vmax=3005)
plt.show()
### ...export to netCDF | en | 0.274647 | # data_nc = netCDF4.Dataset(r'Y:\tmp\kmu\meps\arome_arctic_pp_1km_latest.nc') # la = data_nc.variables['land_area_fraction'][:] # # mask_nc = netCDF4.Dataset(r'N:\Prosjekter\APS\VarslinOmr2018Svalbard2.nc') # # m = mask_nc.variables['VarslingsOmr2018Land'][:] # # mask3003 = np.ma.masked_not_equal(m, 3003) # # la3003 = np.where(m==3003, 2, np.flipud(la)) # m3003 = np.where(m==3003, np.flipud(la), m) # plt.imshow(m3003, vmin=3000, vmax= 3005) # plt.show() #(xmin, xsize, x, ymax, y, ysize) = data.geot # print(rdata.projection) ### ...export to netCDF | 2.049485 | 2 |
tranStitch_v4.py | jsharbrough/transStitch | 0 | 6613945 | <reponame>jsharbrough/transStitch
stats = {}
import sys
def extractScaffolds(assembly,reassemblyFile):
seqDict = buildGenomeDict(assembly)
infile = open(reassemblyFile,'r')
scaffoldList = []
for line in infile:
lineSplit = line.split('\t')
scaff1 = '>' + lineSplit[0]
scaff2 = '>' + lineSplit[1]
if scaff1 not in scaffoldList:
scaffoldList.append(scaff1)
if scaff2 not in scaffoldList:
scaffoldList.append(scaff2)
infile.close()
outfile = open('scaffoldsWithDuplications_v3_' + assembly[0:-6] + '.fasta','w')
stats['# Reassembly Recs'] = len(scaffoldList)
for seq in scaffoldList:
outfile.write(seq + '\n')
outfile.write(seqDict[seq] + '\n')
outfile.close()
return stats
def reverseComplement(seq):
seq_revc = ''
for nuc in seq:
if nuc == 'A':
seq_revc = 'T' + seq_revc
elif nuc == 'T':
seq_revc = 'A' + seq_revc
elif nuc == 'C':
seq_revc = 'G' + seq_revc
elif nuc == 'G':
seq_revc = 'C' + seq_revc
else:
seq_revc = nuc + seq_revc
return seq_revc
def buildSeqDict(listOfAssemblyFiles):
seqDict = {}
seqName = ''
currSeq = ''
for assembly in listOfAssemblyFiles:
infile = open(assembly,'r')
fileNameSplit = assembly.split('_')
assemblyTag = fileNameSplit[0]
for line in infile:
if line[0] == '>':
if seqName != '':
seqDict[seqName] = currSeq
seqName = line
while seqName[-1] == '\n' or seqName[-1] == '\t' or seqName[-1] == '\r':
seqName = seqName[0:-1]
seqName = assemblyTag + '_' + seqName[1:]
currSeq = ''
else:
currSeq += line.upper()
while currSeq[-1] == '\n' or currSeq[-1] == '\t' or currSeq[-1] == '\r':
currSeq = currSeq[0:-1]
seqDict[seqName] = currSeq
infile.close()
return seqDict
def buildGenomeDict(assembly):
seqDict = {}
seqName = ''
currSeq = ''
infile = open(assembly,'r')
for line in infile:
if line[0] == '>':
if seqName != '':
seqDict[seqName] = currSeq
seqName = line
while seqName[-1] == '\n' or seqName[-1] == '\t' or seqName[-1] == '\r':
seqName = seqName[0:-1]
currSeq = ''
else:
currSeq += line.upper()
while currSeq[-1] == '\n' or currSeq[-1] == '\t' or currSeq[-1] == '\r':
currSeq = currSeq[0:-1]
seqDict[seqName] = currSeq
infile.close()
return seqDict
def orient(lineSplit, scaffoldDict, transcriptDict):
logfile = open('logfile.txt','a')
scaffoldTag1 = '>' + lineSplit[1]
scaffoldTag2 = '>' + lineSplit[6]
scaffold1 = scaffoldDict[scaffoldTag1]
scaffold2 = scaffoldDict[scaffoldTag2]
orient1 = int(lineSplit[3])
orient2 = int(lineSplit[8])
transcriptTag = lineSplit[0]
counter = 0
evidence = []
for item in lineSplit[0:-1]:
if counter%11 == 0:
evidence.append(item)
counter += 1
currTranscript = transcriptDict[transcriptTag]
alignment1 = lineSplit[2]
alignment2 = lineSplit[7]
if orient1 == 16 or orient1 == 272:
alignment1 = reverseComplement(alignment1)
alignment1 = alignment1[-12:]
else:
alignment1 = alignment1[0:12]
if orient2 == 16 or orient2 == 272:
alignment2 = reverseComplement(alignment2)
alignment2 = alignment2[-12:]
else:
alignment2 = alignment2[0:12]
numAlignments1 = 0
numAlignments2 = 0
for i in range(len(currTranscript)-11):
currWord = currTranscript[i:i+12]
if alignment1 == currWord:
transPos1 = i
numAlignments1 += 1
if alignment2 == currWord:
transPos2 = i
numAlignments2 += 1
if orient1 == 16 or orient1 == 272:
transPos1 = len(currTranscript) - transPos1 - 12
if orient2 == 16 or orient2 == 272:
transPos2 = len(currTranscript) - transPos2 - 12
if transPos1 < transPos2:
if orient1 == orient2:
newScaffold = scaffold1 + 'N'*50 + scaffold2
newScaffoldTag = '>' + scaffoldTag1[1:] + '_' + scaffoldTag2[1:] + '_stitched'
elif orient1 == 0 or orient1 == 256:
if orient2 == 0 or orient2 == 256:
newScaffold = scaffold1 + 'N'*50 + scaffold2
newScaffoldTag = '>' + scaffoldTag1[1:] + '_' + scaffoldTag2[1:] + '_stitched'
else:
scaffold2_revc = reverseComplement(scaffold2)
newScaffold = scaffold1 + 'N'*50 + scaffold2_revc
newScaffoldTag = '>' + scaffoldTag1[1:] + '_' + scaffoldTag2[1:] + '_rev_comp_stitched'
else:
if orient2 == 16 or orient2 == 272:
newScaffold = scaffold1 + 'N'*50 + scaffold2
newScaffoldTag = '>' + scaffoldTag1[1:] + '_' + scaffoldTag2[1:] + '_stitched'
else:
scaffold1_revc = reverseComplement(scaffold1)
newScaffold = scaffold1_revc + 'N'*50 + scaffold2
newScaffoldTag = '>' + scaffoldTag1[1:] + '_rev_comp_' + scaffoldTag2[1:] + '_stitched'
logfile.write('Stitched scaffolds ' + scaffoldTag2[1:] + ' and ' + scaffoldTag1[1:] + ' based on the following transcripts:' + '\n')
logfile.write('\t')
for item in evidence:
logfile.write(item + '\t')
logfile.write('\n')
elif transPos1 > transPos2:
if orient1 == orient2:
newScaffold = scaffold2 + 'N'*50 + scaffold1
newScaffoldTag = '>' + scaffoldTag2[1:] + '_' + scaffoldTag1[1:] + '_stitched'
elif orient1 == 0 or orient1 == 256:
if orient2 == 0 or orient2 == 256:
newScaffold = scaffold2 + 'N'*50 + scaffold1
newScaffoldTag = '>' + scaffoldTag2[1:] + '_' + scaffoldTag1[1:] + '_stitched'
else:
scaffold2_revc = reverseComplement(scaffold2)
newScaffold = scaffold2_revc + 'N'*50 + scaffold1
newScaffoldTag = '>' + scaffoldTag2[1:] + '_rev_comp_' + scaffoldTag1[1:] + '_stitched'
else:
if orient2 == 16 or orient2 == 272:
newScaffold = scaffold2 + 'N'*50 + scaffold1
newScaffoldTag = '>' + scaffoldTag2[1:] + '_' + scaffoldTag1[1:] + '_stitched'
else:
scaffold1_revc = reverseComplement(scaffold1)
newScaffold = scaffold1_revc + 'N'*50 + scaffold2
newScaffoldTag ='>' + scaffoldTag2[1:] + '_' + scaffoldTag1[1:] + '_rev_comp_stitched'
logfile.write('Stitched scaffolds ' + scaffoldTag2[1:] + ' and ' + scaffoldTag1[1:] + ' based on the following transcripts:\n')
logfile.write('\t')
for item in evidence:
logfile.write(item + '\t')
logfile.write('\n')
logfile.close()
return newScaffoldTag,newScaffold
else:
logfile.write('Could not determine stitching orientation for scaffolds ' + scaffoldTag1 + ' and ' + scaffoldTag2 + '\n')
logfile.close()
newScaffoldTag,newScaffold = 'No stitch','No stitch'
return newScaffoldTag,newScaffold
def mergeStitches(stitchFile, genome, listOfTranscriptomes):
stitchRecs = open(stitchFile,'r')
scaffoldDict = buildGenomeDict(genome)
transcriptDict = buildSeqDict(listOfTranscriptomes)
logfile = open('logfile.txt','a')
logfile.write('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n' + genome[0:-6] + '\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n')
scaffoldList = []
stitchDict = {}
for line in stitchRecs:
lineSplit = line.split('\t')
scaffoldTag1 = '>' + lineSplit[1]
scaffoldTag2 = '>' + lineSplit[6]
if scaffoldTag1 not in scaffoldList:
scaffoldList.append(scaffoldTag1)
if scaffoldTag2 not in scaffoldList:
scaffoldList.append(scaffoldTag2)
numTranscripts = len(lineSplit)/11
stitchDict[(scaffoldTag1,scaffoldTag2)] = lineSplit + [numTranscripts]
stitchRecs.close()
stats['# Possible Stitches'] = len(stitchDict)
numStitches = 0
for a in scaffoldList:
currPairs = []
for b in scaffoldList:
if (a,b) in stitchDict:
stitchInfo = stitchDict[(a,b)]
currPairs.append([a,b,stitchInfo[-1]])
elif (b,a) in stitchDict:
stitchInfo = stitchDict[(b,a)]
currPairs.append([b,a,stitchInfo[-1]])
if len(currPairs) == 1:
pair = currPairs[0]
stitchInfo = stitchDict[(pair[0],pair[1])]
currStitchName, currStitch = orient(stitchInfo, scaffoldDict, transcriptDict)
if currStitchName != 'No stitch':
del scaffoldDict[pair[0]]
del scaffoldDict[pair[1]]
scaffoldList.remove(pair[0])
scaffoldList.remove(pair[1])
scaffoldDict[currStitchName] = currStitch
numStitches += 1
else:
maxEvidence = 0
sortedIndices = []
index = 0
currPairIndex = -1
for pair in currPairs:
currEvidence = pair[2]
if currEvidence >= maxEvidence:
maxEvidence = currEvidence
sortedIndices.append(index)
else:
i = 0
for item in sortedIndices:
pair = currPairs[item]
numTranscripts = pair[2]
if currEvidence <= numTranscripts:
sortedIndices = sortedIndices[0:i] + [index] + sortedIndices[i:]
elif i < len(sortedIndices) - 1:
i += 1
else:
sortedIndices.append(index)
index += 1
sortedPairs = []
for num in sortedIndices:
pair = currPairs[num]
sortedPairs.append(pair)
newPairs = []
for pair in currPairs:
if pair[2] == maxEvidence:
newPairs.append(pair)
if len(newPairs) > 1:
stitchLengths = []
maxLength = 0
for pair in newPairs:
stitchLength = len(scaffoldDict[pair[0]]) + len(scaffoldDict[pair[1]])
if stitchLength > maxLength:
stitchLengths.append(pair[0:2] + [stitchLength])
maxLength = stitchLength
stitches = []
for stitch in stitchLengths:
if stitch[2] == maxLength:
stitches.append((stitch[0],stitch[1]))
stitchInfo = stitchDict[stitches[0]]
currStitchName, currStitch = orient(stitchInfo, scaffoldDict, transcriptDict)
if currStitchName != 'No stitch':
del scaffoldDict[pair[0]]
del scaffoldDict[pair[1]]
scaffoldList.remove(pair[0])
scaffoldList.remove(pair[1])
scaffoldDict[currStitchName] = currStitch
numStitches += 1
else:
while currStitchName == 'No stitch' and -currPairIndex < len(currPairs):
currPairIndex -= 1
nextStitch = sortedPairs[currPairIndex]
stitchInfo = stitchDict[(nextStitch[0],nextStitch[1])]
currStitchName, currStitch = orient(stitchInfo, scaffoldDict, transcriptDict)
if currStitchName != 'No stitch':
del scaffoldDict[pair[0]]
del scaffoldDict[pair[1]]
scaffoldList.remove(pair[0])
scaffoldList.remove(pair[1])
scaffoldDict[currStitchName] = currStitch
numStitches += 1
elif len(newPairs) == 1:
pair = newPairs[0]
stitchInfo = stitchDict[(pair[0],pair[1])]
currStitchName, currStitch = orient(stitchInfo, scaffoldDict, transcriptDict)
if currStitchName != 'No stitch':
del scaffoldDict[pair[0]]
del scaffoldDict[pair[1]]
scaffoldList.remove(pair[0])
scaffoldList.remove(pair[1])
scaffoldDict[currStitchName] = currStitch
numStitches += 1
else:
while currStitchName == 'No stitch' and (-currPairIndex) < len(currPairs):
currPairIndex -= 1
nextStitch = sortedPairs[currPairIndex]
stitchInfo = stitchDict[(nextStitch[0],nextStitch[1])]
currStitchName, currStitch = orient(stitchInfo, scaffoldDict, transcriptDict)
if currStitchName != 'No stitch':
del scaffoldDict[pair[0]]
del scaffoldDict[pair[1]]
scaffoldList.remove(pair[0])
scaffoldList.remove(pair[1])
scaffoldDict[currStitchName] = currStitch
numStitches += 1
outfile = open('stitched_scaffolds_v3_' + genome[0:-6] + '.fasta','w')
lengthList = []
lengthDict = {}
for seq in scaffoldDict:
currLength = len(scaffoldDict[seq])
if currLength not in lengthList:
lengths = [seq]
lengthDict[currLength] = lengths
lengthList.append(currLength)
else:
lengths = lengthDict[currLength]
lengths.append(seq)
lengthDict[currLength] = lengths
lengthList = sorted(lengthList, reverse = True)
for length in lengthList:
seqs = lengthDict[length]
for seq in seqs:
outfile.write(seq + '\n')
outfile.write(scaffoldDict[seq] + '\n')
outfile.close()
stats['# Stitches Made'] = numStitches
def compareAlignments(seq1,seq2):
same = True
if len(seq1) <= len(seq2):
for i in range(len(seq1)):
if seq1[i] != seq2[i]:
same = False
if same == False:
seq2_revc = ''
for nuc in seq2:
if nuc == 'A':
seq2_revc = 'T' + seq2_revc
elif nuc == 'T':
seq2_revc = 'A' + seq2_revc
elif nuc == 'C':
seq2_revc = 'G' + seq2_revc
elif nuc == 'G':
seq2_revc = 'C' + seq2_revc
else:
seq2_revc = nuc + seq2_revc
same = True
for i in range(len(seq1)):
if seq1[i] != seq2_revc[i]:
same = False
else:
for i in range(len(seq2)):
if seq1[i] != seq2[i]:
same = False
if same == False:
seq2_revc = ''
for nuc in seq2:
if nuc == 'A':
seq2_revc = 'T' + seq2_revc
elif nuc == 'T':
seq2_revc = 'A' + seq2_revc
elif nuc == 'C':
seq2_revc = 'G' + seq2_revc
elif nuc == 'G':
seq2_revc = 'C' + seq2_revc
else:
seq2_revc = nuc + seq2_revc
same = True
for i in range(len(seq2)):
if seq1[i] != seq2_revc[i]:
same = False
return same
def stitch(genome, listOfSamFiles):
outfile = open('stitch_v3_Recommendations_' + genome[0:-6] + '.txt','w')
outfile2 = open('duplicatedRegions_v3_' + genome[0:-6] + '.txt','w')
scaffoldDict = {}
scaffoldList = []
for infile in listOfSamFiles:
currFile = open(infile,'r')
nameSplit = infile[0:-4].split('_')
for line in currFile:
if line[0] != '@':
lineSplit = line.split('\t')
transcriptTag = nameSplit[-1] + '_' + lineSplit[0]#Critical for proper stitching!
transcriptSplit = transcriptTag.split('_')
if len(transcriptSplit) > 4:
transcriptTag = transcriptSplit[0] + '_'
for i in range(1,len(transcriptSplit)-1):
transcriptTag += transcriptSplit[i] + '_'
transcriptTag = transcriptTag[0:-1]
orientation = lineSplit[1]
scaffold = lineSplit[2]
startPos = lineSplit[3]
numMapLocationTag = int(lineSplit[4])
cigar = lineSplit[5]
alignment = lineSplit[9]
if numMapLocationTag == 50 or numMapLocationTag == 3: #or numMapLocationTag == 3: #remove second condition for real code
transcriptInfo = [alignment,orientation,startPos,cigar]
scaffIn = False
for scaff in scaffoldList:
if scaff == scaffold:
scaffIn = True
if scaffIn == True:
currDict = scaffoldDict[scaffold]
currDict[transcriptTag] = transcriptInfo
scaffoldDict[scaffold] = currDict
else:
currDict = {transcriptTag:transcriptInfo}
scaffoldDict[scaffold] = currDict
scaffoldList.append(scaffold)
currFile.close()
stitch = False
counter = 1
stitchDict = {}
stitchScaffoldList = []
numScaffoldsSharingTranscript = 0
for scaffold1 in scaffoldList[0:-1]:
scaff1Dict = scaffoldDict[scaffold1]
scaff1TranscriptList = []
for key in scaff1Dict:
scaff1TranscriptList.append(key)
for scaffold2 in scaffoldList[counter:]:
scaff2Dict = scaffoldDict[scaffold2]
scaff2TranscriptList = []
for key in scaff2Dict:
scaff2TranscriptList.append(key)
stitchList = []
for trans1 in scaff1TranscriptList:
transcript1Info = scaff1Dict[trans1]
for trans2 in scaff2TranscriptList:
stitch = False
transcript1Info = scaff1Dict[trans1]
align1 = transcript1Info[0]
transcript2Info = scaff2Dict[trans2]
align2 = transcript2Info[0]
if trans1 == trans2:
test = compareAlignments(align1,align2)
numScaffoldsSharingTranscript += 1
if test == False:
stitch = True
if stitch == True:
stitchList.append(trans1)
stitchDict[(scaffold1,scaffold2)] = stitchList
stitchScaffoldList.append((scaffold1,scaffold2))
elif trans1 == trans2:
startPos1 = int(transcript1Info[2])
cigar1 = transcript1Info[3]
startPos2 = int(transcript2Info[2])
cigar2 = transcript2Info[3]
orient1 = int(transcript1Info[1])
orient2 = int(transcript2Info[1])
totalLength = 0
currString = ''
for char in cigar1:
if char == 'M':
totalLength += int(currString)
currString = ''
elif char == 'N':
totalLength += int(currString)
currString = ''
elif char == 'I':
totalLength += int(currString)
currString = ''
elif char == 'D':
currString = ''
elif char == 'S':
totalLength += int(currString)
currString = ''
elif char == 'H':
currString = ''
elif char == 'P':
currString = ''
elif char == 'X':
totalLength += int(currString)
currString = ''
elif char == '=':
totalLength += int(currString)
currString = ''
else:
currString += char
if orient1 == 16 or orient1 == 272:
endPos1 = startPos1 - totalLength
elif orient1 == 0 or orient1 == 256:
endPos1 = startPos1 + totalLength
else:
endPos1 = False
totalLength = 0
currString = ''
for char in cigar2:
if char == 'M':
totalLength += int(currString)
currString = ''
elif char == 'N':
totalLength += int(currString)
currString = ''
elif char == 'I':
totalLength += int(currString)
currString = ''
elif char == 'D':
currString = ''
elif char == 'S':
totalLength += int(currString)
currString = ''
elif char == 'H':
currString = ''
elif char == 'P':
currString = ''
elif char == 'X':
totalLength += int(currString)
currString = ''
elif char == '=':
totalLength += int(currString)
currString = ''
else:
currString += char
if orient2 == 16 or orient2 == 272:
endPos2 = startPos2 - totalLength
elif orient2 == 0 or orient2 == 256:
endPos2 = startPos2 + totalLength
else:
endPos2 = False
if endPos1 != False and endPos2 != False:
outfile2.write(trans1 + '\t' + scaffold1 + '\t' + str(orient1) + '\t' + str(startPos1) + '\t' + str(endPos1) + '\t' + scaffold2 + '\t' + str(orient2) + '\t' + str(startPos2) + '\t' + str(endPos2) + '\n')
counter += 1
for stitch in stitchScaffoldList:
scaff1 = stitch[0]
scaff2 = stitch[1]
scaff1Dict = scaffoldDict[scaff1]
scaff2Dict = scaffoldDict[scaff2]
evidence = stitchDict[stitch]
for transcript in evidence:
transcriptInfo1 = scaff1Dict[transcript]
transcriptInfo2 = scaff2Dict[transcript]
outfile.write(transcript + '\t')
outfile.write(scaff1 + '\t')
for item in transcriptInfo1:
outfile.write(item + '\t')
outfile.write(scaff2 + '\t')
for item in transcriptInfo2:
outfile.write(item + '\t')
outfile.write('\n')
stats['# Scaffolds Mapped To'] = len(scaffoldList)
stats['# Scaffolds Sharing a Transcript'] = numScaffoldsSharingTranscript
outfile.close()
outfile2.close()
return stats
def transcriptMultiMap(samFile):
infile = open(samFile,'r')
transcriptDict = {}
transcriptList = []
scaffoldDict = {}
for line in infile:
if line[0] != '@':
lineSplit = line.split('\t')
transcriptTag = lineSplit[0]
transcriptSplit = transcriptTag.split('_')
if len(transcriptSplit) > 3:
transcriptTag = transcriptSplit[0]
for i in range(1,len(transcriptSplit)-1):
transcriptTag += transcriptSplit[i] + '_'
transcriptTag = transcriptTag[0:-1]
orientation = lineSplit[1]
scaffold = lineSplit[2]
startPos = lineSplit[3]
numMapLocationTag = int(lineSplit[4])
cigar = lineSplit[5]
if numMapLocationTag == 50 or numMapLocationTag == 3:
if transcriptTag in transcriptList:
currDict = transcriptDict[transcriptTag]
scaffIn = False
i = 0
while scaffIn == False and i < len(currDict):
for scaff in currDict:
if scaff == scaffold:
scaffIn == True
i += 1
if scaffIn == False:
scaffInfo = [[orientation, startPos, cigar]]
currDict[scaffold] = scaffInfo
else:
scaffInfo = [[orientation, startPos, cigar]]
currDict[scaffold] += scaffInfo
transcriptDict[transcriptTag] = currDict
else:
transcriptList.append(transcriptTag)
currDict = {}
scaffInfo = [[orientation, startPos, cigar]]
currDict[scaffold] = scaffInfo
transcriptDict[transcriptTag] = currDict
outfile = open('transcript_supports_' + samFile[0:-4] + '.txt','w')
for transcript in transcriptList:
if len(transcriptDict[transcript]) > 1:
scaffDict = transcriptDict[transcript]
outfile.write(transcript + '\t')
for scaffold in scaffDict:
outfile.write(scaffold + '\t')
for transcriptRegion in scaffDict[scaffold]:
for item in transcriptRegion:
outfile.write(str(item) + '\t')
outfile.write('\n')
infile.close()
outfile.close()
def scaffolds2Stitch(Infile):
infile = open(Infile,'r')
outfile = open('scaffolds2stitch_' + Infile[20:-4] + '.txt','w')
scaffoldDict = {}
for line in infile:
scaffoldPairs = []
if line[0] != 'T':
scaffoldList = []
lineSplit = line.split('\t')
i = 0
scaffoldList.append(lineSplit[1])
for item in lineSplit[1:-1]:
if i%4 == 0:
scaffoldList.append(item)
i += 1
for i in range(len(scaffoldList)):
for j in range(len(scaffoldList)):
if scaffoldList[i] != scaffoldList[j]:
if (scaffoldList[i],scaffoldList[j]) not in scaffoldPairs and (scaffoldList[j],scaffoldList[i]) not in scaffoldPairs:
scaffoldPairs.append((scaffoldList[i],scaffoldList[j]))
for pair in scaffoldPairs:
if pair not in scaffoldDict:
scaffoldDict[pair] = [lineSplit[0]]
else:
scaffoldDict[pair] += [lineSplit[0]]
outfile.write('Scaffold 1' + '\t' + 'Scaffold 2' + '\t' + 'Transcripts Supporting' + '\n')
for pair in scaffoldDict:
print pair
outfile.write(pair[0] + '\t' + pair[1])
transcriptList = scaffoldDict[pair]
for transcript in transcriptList:
outfile.write('\t' + transcript)
outfile.write('\n')
infile.close()
outfile.close()
import sys
def matchMisMatch(i,j):
if i == j:
return (2)
else:
return (-1)
def align(Infile):
infile = open(Infile,'r')
outfile = open('alignment.fasta','w')
seqDict = {}
currentSeq = ''
currentSeqName = ''
counter = 0
for line in infile:
counter += 1
if line[0] == '>' and currentSeqName == '':
currentSeqName = line[0:-1]
elif line[0] == '>':
seqDict[currentSeqName] = currentSeq
currentSeqName = line
while currentSeqName[-1] == '\n' or currentSeqName[-1] == '\t' or currentSeqName[-1] == '\r':
currentSeqName = currentSeqName[0:-1]
currentSeq = ''
else:
currentSeq += line
while currentSeq[-1] == '\n' or currentSeq[-1] == '\t' or currentSeq[-1] == '\r':
currentSeq = currentSeq[0:-1]
currentSeq = currentSeq.upper()
seqDict[currentSeqName] = currentSeq
seq1 = ''
for i in seqDict:
if seq1 == '':
seq1 = seqDict[i]
else:
seq2 = seqDict[i]
a = {}
m = len(seq1)
n = len(seq2)
gapScore = (-2)
gapExtend = 1
for i in range(m+1):
a[i,0] = i*gapScore
for j in range(n+1):
a[0,j] = j*gapScore
for i in range(1,m + 1):
for j in range(1,n + 1):
a[i,j] = max((a[i-1,j] + gapScore),(a[i-1,j-1] + matchMisMatch(seq1[i-1],seq2[j-1])),(a[i,j-1] + gapScore))
x = m
y = n
newSeq1 = ''
newSeq2 = ''
while x > 0 and y > 0:
threeSquares = [a[x,y-1],a[x-1,y-1],a[x-1,y]]
if threeSquares[0] > threeSquares[1] and threeSquares[0] > threeSquares[2]:
newSeq1 = '-' + newSeq1
newSeq2 = seq2[y-1] + newSeq2
y = y - 1
elif threeSquares[1] >= threeSquares[2]:
newSeq1 = seq1[x-1] + newSeq1
newSeq2 = seq2[y-1] + newSeq2
x = x - 1
y = y - 1
else:
newSeq1 = seq1[x-1] + newSeq1
newSeq2 = '-' + newSeq2
x = x - 1
outfile.write('>Seq1\n')
outfile.write(newSeq1 + '\n')
outfile.write('>Seq2\n')
outfile.write(newSeq2 + '\n')
infile.close()
outfile.close()
def runCommand():
if sys.argv[1] == 'stitch':
sams = sys.argv[3]
if sams[-4:] == 'fofn':
samList = []
infile = open(sams,'r')
for line in infile:
samList.append(line[0:-1])
infile.close()
stitch(sys.argv[2],samList)
elif sams[-3:] == 'sam':
samList = [sams]
stitch(sys.argv[2],samList)
else:
print 'Bad formatting'
return 'Bad formatting'
elif sys.argv[1] == 'merge':
reads = sys.argv[4]
if reads[-4:] == 'fofn':
readList = []
infile = open(reads,'r')
for line in infile:
readList.append(line[0:-1])
infile.close()
mergeStitches(sys.argv[2],sys.argv[3],readList)
elif reads[-5:] == 'fasta':
mergeStitches(sys.argv[2],sys.argv[3],sys.argv[4])
else:
print 'Bad formatting'
return 'Bad formatting'
else:
print 'No command specified'
return 'No command specified'
runCommand()
| stats = {}
import sys
def extractScaffolds(assembly,reassemblyFile):
seqDict = buildGenomeDict(assembly)
infile = open(reassemblyFile,'r')
scaffoldList = []
for line in infile:
lineSplit = line.split('\t')
scaff1 = '>' + lineSplit[0]
scaff2 = '>' + lineSplit[1]
if scaff1 not in scaffoldList:
scaffoldList.append(scaff1)
if scaff2 not in scaffoldList:
scaffoldList.append(scaff2)
infile.close()
outfile = open('scaffoldsWithDuplications_v3_' + assembly[0:-6] + '.fasta','w')
stats['# Reassembly Recs'] = len(scaffoldList)
for seq in scaffoldList:
outfile.write(seq + '\n')
outfile.write(seqDict[seq] + '\n')
outfile.close()
return stats
def reverseComplement(seq):
seq_revc = ''
for nuc in seq:
if nuc == 'A':
seq_revc = 'T' + seq_revc
elif nuc == 'T':
seq_revc = 'A' + seq_revc
elif nuc == 'C':
seq_revc = 'G' + seq_revc
elif nuc == 'G':
seq_revc = 'C' + seq_revc
else:
seq_revc = nuc + seq_revc
return seq_revc
def buildSeqDict(listOfAssemblyFiles):
seqDict = {}
seqName = ''
currSeq = ''
for assembly in listOfAssemblyFiles:
infile = open(assembly,'r')
fileNameSplit = assembly.split('_')
assemblyTag = fileNameSplit[0]
for line in infile:
if line[0] == '>':
if seqName != '':
seqDict[seqName] = currSeq
seqName = line
while seqName[-1] == '\n' or seqName[-1] == '\t' or seqName[-1] == '\r':
seqName = seqName[0:-1]
seqName = assemblyTag + '_' + seqName[1:]
currSeq = ''
else:
currSeq += line.upper()
while currSeq[-1] == '\n' or currSeq[-1] == '\t' or currSeq[-1] == '\r':
currSeq = currSeq[0:-1]
seqDict[seqName] = currSeq
infile.close()
return seqDict
def buildGenomeDict(assembly):
seqDict = {}
seqName = ''
currSeq = ''
infile = open(assembly,'r')
for line in infile:
if line[0] == '>':
if seqName != '':
seqDict[seqName] = currSeq
seqName = line
while seqName[-1] == '\n' or seqName[-1] == '\t' or seqName[-1] == '\r':
seqName = seqName[0:-1]
currSeq = ''
else:
currSeq += line.upper()
while currSeq[-1] == '\n' or currSeq[-1] == '\t' or currSeq[-1] == '\r':
currSeq = currSeq[0:-1]
seqDict[seqName] = currSeq
infile.close()
return seqDict
def orient(lineSplit, scaffoldDict, transcriptDict):
logfile = open('logfile.txt','a')
scaffoldTag1 = '>' + lineSplit[1]
scaffoldTag2 = '>' + lineSplit[6]
scaffold1 = scaffoldDict[scaffoldTag1]
scaffold2 = scaffoldDict[scaffoldTag2]
orient1 = int(lineSplit[3])
orient2 = int(lineSplit[8])
transcriptTag = lineSplit[0]
counter = 0
evidence = []
for item in lineSplit[0:-1]:
if counter%11 == 0:
evidence.append(item)
counter += 1
currTranscript = transcriptDict[transcriptTag]
alignment1 = lineSplit[2]
alignment2 = lineSplit[7]
if orient1 == 16 or orient1 == 272:
alignment1 = reverseComplement(alignment1)
alignment1 = alignment1[-12:]
else:
alignment1 = alignment1[0:12]
if orient2 == 16 or orient2 == 272:
alignment2 = reverseComplement(alignment2)
alignment2 = alignment2[-12:]
else:
alignment2 = alignment2[0:12]
numAlignments1 = 0
numAlignments2 = 0
for i in range(len(currTranscript)-11):
currWord = currTranscript[i:i+12]
if alignment1 == currWord:
transPos1 = i
numAlignments1 += 1
if alignment2 == currWord:
transPos2 = i
numAlignments2 += 1
if orient1 == 16 or orient1 == 272:
transPos1 = len(currTranscript) - transPos1 - 12
if orient2 == 16 or orient2 == 272:
transPos2 = len(currTranscript) - transPos2 - 12
if transPos1 < transPos2:
if orient1 == orient2:
newScaffold = scaffold1 + 'N'*50 + scaffold2
newScaffoldTag = '>' + scaffoldTag1[1:] + '_' + scaffoldTag2[1:] + '_stitched'
elif orient1 == 0 or orient1 == 256:
if orient2 == 0 or orient2 == 256:
newScaffold = scaffold1 + 'N'*50 + scaffold2
newScaffoldTag = '>' + scaffoldTag1[1:] + '_' + scaffoldTag2[1:] + '_stitched'
else:
scaffold2_revc = reverseComplement(scaffold2)
newScaffold = scaffold1 + 'N'*50 + scaffold2_revc
newScaffoldTag = '>' + scaffoldTag1[1:] + '_' + scaffoldTag2[1:] + '_rev_comp_stitched'
else:
if orient2 == 16 or orient2 == 272:
newScaffold = scaffold1 + 'N'*50 + scaffold2
newScaffoldTag = '>' + scaffoldTag1[1:] + '_' + scaffoldTag2[1:] + '_stitched'
else:
scaffold1_revc = reverseComplement(scaffold1)
newScaffold = scaffold1_revc + 'N'*50 + scaffold2
newScaffoldTag = '>' + scaffoldTag1[1:] + '_rev_comp_' + scaffoldTag2[1:] + '_stitched'
logfile.write('Stitched scaffolds ' + scaffoldTag2[1:] + ' and ' + scaffoldTag1[1:] + ' based on the following transcripts:' + '\n')
logfile.write('\t')
for item in evidence:
logfile.write(item + '\t')
logfile.write('\n')
elif transPos1 > transPos2:
if orient1 == orient2:
newScaffold = scaffold2 + 'N'*50 + scaffold1
newScaffoldTag = '>' + scaffoldTag2[1:] + '_' + scaffoldTag1[1:] + '_stitched'
elif orient1 == 0 or orient1 == 256:
if orient2 == 0 or orient2 == 256:
newScaffold = scaffold2 + 'N'*50 + scaffold1
newScaffoldTag = '>' + scaffoldTag2[1:] + '_' + scaffoldTag1[1:] + '_stitched'
else:
scaffold2_revc = reverseComplement(scaffold2)
newScaffold = scaffold2_revc + 'N'*50 + scaffold1
newScaffoldTag = '>' + scaffoldTag2[1:] + '_rev_comp_' + scaffoldTag1[1:] + '_stitched'
else:
if orient2 == 16 or orient2 == 272:
newScaffold = scaffold2 + 'N'*50 + scaffold1
newScaffoldTag = '>' + scaffoldTag2[1:] + '_' + scaffoldTag1[1:] + '_stitched'
else:
scaffold1_revc = reverseComplement(scaffold1)
newScaffold = scaffold1_revc + 'N'*50 + scaffold2
newScaffoldTag ='>' + scaffoldTag2[1:] + '_' + scaffoldTag1[1:] + '_rev_comp_stitched'
logfile.write('Stitched scaffolds ' + scaffoldTag2[1:] + ' and ' + scaffoldTag1[1:] + ' based on the following transcripts:\n')
logfile.write('\t')
for item in evidence:
logfile.write(item + '\t')
logfile.write('\n')
logfile.close()
return newScaffoldTag,newScaffold
else:
logfile.write('Could not determine stitching orientation for scaffolds ' + scaffoldTag1 + ' and ' + scaffoldTag2 + '\n')
logfile.close()
newScaffoldTag,newScaffold = 'No stitch','No stitch'
return newScaffoldTag,newScaffold
def mergeStitches(stitchFile, genome, listOfTranscriptomes):
stitchRecs = open(stitchFile,'r')
scaffoldDict = buildGenomeDict(genome)
transcriptDict = buildSeqDict(listOfTranscriptomes)
logfile = open('logfile.txt','a')
logfile.write('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n' + genome[0:-6] + '\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n')
scaffoldList = []
stitchDict = {}
for line in stitchRecs:
lineSplit = line.split('\t')
scaffoldTag1 = '>' + lineSplit[1]
scaffoldTag2 = '>' + lineSplit[6]
if scaffoldTag1 not in scaffoldList:
scaffoldList.append(scaffoldTag1)
if scaffoldTag2 not in scaffoldList:
scaffoldList.append(scaffoldTag2)
numTranscripts = len(lineSplit)/11
stitchDict[(scaffoldTag1,scaffoldTag2)] = lineSplit + [numTranscripts]
stitchRecs.close()
stats['# Possible Stitches'] = len(stitchDict)
numStitches = 0
for a in scaffoldList:
currPairs = []
for b in scaffoldList:
if (a,b) in stitchDict:
stitchInfo = stitchDict[(a,b)]
currPairs.append([a,b,stitchInfo[-1]])
elif (b,a) in stitchDict:
stitchInfo = stitchDict[(b,a)]
currPairs.append([b,a,stitchInfo[-1]])
if len(currPairs) == 1:
pair = currPairs[0]
stitchInfo = stitchDict[(pair[0],pair[1])]
currStitchName, currStitch = orient(stitchInfo, scaffoldDict, transcriptDict)
if currStitchName != 'No stitch':
del scaffoldDict[pair[0]]
del scaffoldDict[pair[1]]
scaffoldList.remove(pair[0])
scaffoldList.remove(pair[1])
scaffoldDict[currStitchName] = currStitch
numStitches += 1
else:
maxEvidence = 0
sortedIndices = []
index = 0
currPairIndex = -1
for pair in currPairs:
currEvidence = pair[2]
if currEvidence >= maxEvidence:
maxEvidence = currEvidence
sortedIndices.append(index)
else:
i = 0
for item in sortedIndices:
pair = currPairs[item]
numTranscripts = pair[2]
if currEvidence <= numTranscripts:
sortedIndices = sortedIndices[0:i] + [index] + sortedIndices[i:]
elif i < len(sortedIndices) - 1:
i += 1
else:
sortedIndices.append(index)
index += 1
sortedPairs = []
for num in sortedIndices:
pair = currPairs[num]
sortedPairs.append(pair)
newPairs = []
for pair in currPairs:
if pair[2] == maxEvidence:
newPairs.append(pair)
if len(newPairs) > 1:
stitchLengths = []
maxLength = 0
for pair in newPairs:
stitchLength = len(scaffoldDict[pair[0]]) + len(scaffoldDict[pair[1]])
if stitchLength > maxLength:
stitchLengths.append(pair[0:2] + [stitchLength])
maxLength = stitchLength
stitches = []
for stitch in stitchLengths:
if stitch[2] == maxLength:
stitches.append((stitch[0],stitch[1]))
stitchInfo = stitchDict[stitches[0]]
currStitchName, currStitch = orient(stitchInfo, scaffoldDict, transcriptDict)
if currStitchName != 'No stitch':
del scaffoldDict[pair[0]]
del scaffoldDict[pair[1]]
scaffoldList.remove(pair[0])
scaffoldList.remove(pair[1])
scaffoldDict[currStitchName] = currStitch
numStitches += 1
else:
while currStitchName == 'No stitch' and -currPairIndex < len(currPairs):
currPairIndex -= 1
nextStitch = sortedPairs[currPairIndex]
stitchInfo = stitchDict[(nextStitch[0],nextStitch[1])]
currStitchName, currStitch = orient(stitchInfo, scaffoldDict, transcriptDict)
if currStitchName != 'No stitch':
del scaffoldDict[pair[0]]
del scaffoldDict[pair[1]]
scaffoldList.remove(pair[0])
scaffoldList.remove(pair[1])
scaffoldDict[currStitchName] = currStitch
numStitches += 1
elif len(newPairs) == 1:
pair = newPairs[0]
stitchInfo = stitchDict[(pair[0],pair[1])]
currStitchName, currStitch = orient(stitchInfo, scaffoldDict, transcriptDict)
if currStitchName != 'No stitch':
del scaffoldDict[pair[0]]
del scaffoldDict[pair[1]]
scaffoldList.remove(pair[0])
scaffoldList.remove(pair[1])
scaffoldDict[currStitchName] = currStitch
numStitches += 1
else:
while currStitchName == 'No stitch' and (-currPairIndex) < len(currPairs):
currPairIndex -= 1
nextStitch = sortedPairs[currPairIndex]
stitchInfo = stitchDict[(nextStitch[0],nextStitch[1])]
currStitchName, currStitch = orient(stitchInfo, scaffoldDict, transcriptDict)
if currStitchName != 'No stitch':
del scaffoldDict[pair[0]]
del scaffoldDict[pair[1]]
scaffoldList.remove(pair[0])
scaffoldList.remove(pair[1])
scaffoldDict[currStitchName] = currStitch
numStitches += 1
outfile = open('stitched_scaffolds_v3_' + genome[0:-6] + '.fasta','w')
lengthList = []
lengthDict = {}
for seq in scaffoldDict:
currLength = len(scaffoldDict[seq])
if currLength not in lengthList:
lengths = [seq]
lengthDict[currLength] = lengths
lengthList.append(currLength)
else:
lengths = lengthDict[currLength]
lengths.append(seq)
lengthDict[currLength] = lengths
lengthList = sorted(lengthList, reverse = True)
for length in lengthList:
seqs = lengthDict[length]
for seq in seqs:
outfile.write(seq + '\n')
outfile.write(scaffoldDict[seq] + '\n')
outfile.close()
stats['# Stitches Made'] = numStitches
def compareAlignments(seq1,seq2):
same = True
if len(seq1) <= len(seq2):
for i in range(len(seq1)):
if seq1[i] != seq2[i]:
same = False
if same == False:
seq2_revc = ''
for nuc in seq2:
if nuc == 'A':
seq2_revc = 'T' + seq2_revc
elif nuc == 'T':
seq2_revc = 'A' + seq2_revc
elif nuc == 'C':
seq2_revc = 'G' + seq2_revc
elif nuc == 'G':
seq2_revc = 'C' + seq2_revc
else:
seq2_revc = nuc + seq2_revc
same = True
for i in range(len(seq1)):
if seq1[i] != seq2_revc[i]:
same = False
else:
for i in range(len(seq2)):
if seq1[i] != seq2[i]:
same = False
if same == False:
seq2_revc = ''
for nuc in seq2:
if nuc == 'A':
seq2_revc = 'T' + seq2_revc
elif nuc == 'T':
seq2_revc = 'A' + seq2_revc
elif nuc == 'C':
seq2_revc = 'G' + seq2_revc
elif nuc == 'G':
seq2_revc = 'C' + seq2_revc
else:
seq2_revc = nuc + seq2_revc
same = True
for i in range(len(seq2)):
if seq1[i] != seq2_revc[i]:
same = False
return same
def stitch(genome, listOfSamFiles):
outfile = open('stitch_v3_Recommendations_' + genome[0:-6] + '.txt','w')
outfile2 = open('duplicatedRegions_v3_' + genome[0:-6] + '.txt','w')
scaffoldDict = {}
scaffoldList = []
for infile in listOfSamFiles:
currFile = open(infile,'r')
nameSplit = infile[0:-4].split('_')
for line in currFile:
if line[0] != '@':
lineSplit = line.split('\t')
transcriptTag = nameSplit[-1] + '_' + lineSplit[0]#Critical for proper stitching!
transcriptSplit = transcriptTag.split('_')
if len(transcriptSplit) > 4:
transcriptTag = transcriptSplit[0] + '_'
for i in range(1,len(transcriptSplit)-1):
transcriptTag += transcriptSplit[i] + '_'
transcriptTag = transcriptTag[0:-1]
orientation = lineSplit[1]
scaffold = lineSplit[2]
startPos = lineSplit[3]
numMapLocationTag = int(lineSplit[4])
cigar = lineSplit[5]
alignment = lineSplit[9]
if numMapLocationTag == 50 or numMapLocationTag == 3: #or numMapLocationTag == 3: #remove second condition for real code
transcriptInfo = [alignment,orientation,startPos,cigar]
scaffIn = False
for scaff in scaffoldList:
if scaff == scaffold:
scaffIn = True
if scaffIn == True:
currDict = scaffoldDict[scaffold]
currDict[transcriptTag] = transcriptInfo
scaffoldDict[scaffold] = currDict
else:
currDict = {transcriptTag:transcriptInfo}
scaffoldDict[scaffold] = currDict
scaffoldList.append(scaffold)
currFile.close()
stitch = False
counter = 1
stitchDict = {}
stitchScaffoldList = []
numScaffoldsSharingTranscript = 0
for scaffold1 in scaffoldList[0:-1]:
scaff1Dict = scaffoldDict[scaffold1]
scaff1TranscriptList = []
for key in scaff1Dict:
scaff1TranscriptList.append(key)
for scaffold2 in scaffoldList[counter:]:
scaff2Dict = scaffoldDict[scaffold2]
scaff2TranscriptList = []
for key in scaff2Dict:
scaff2TranscriptList.append(key)
stitchList = []
for trans1 in scaff1TranscriptList:
transcript1Info = scaff1Dict[trans1]
for trans2 in scaff2TranscriptList:
stitch = False
transcript1Info = scaff1Dict[trans1]
align1 = transcript1Info[0]
transcript2Info = scaff2Dict[trans2]
align2 = transcript2Info[0]
if trans1 == trans2:
test = compareAlignments(align1,align2)
numScaffoldsSharingTranscript += 1
if test == False:
stitch = True
if stitch == True:
stitchList.append(trans1)
stitchDict[(scaffold1,scaffold2)] = stitchList
stitchScaffoldList.append((scaffold1,scaffold2))
elif trans1 == trans2:
startPos1 = int(transcript1Info[2])
cigar1 = transcript1Info[3]
startPos2 = int(transcript2Info[2])
cigar2 = transcript2Info[3]
orient1 = int(transcript1Info[1])
orient2 = int(transcript2Info[1])
totalLength = 0
currString = ''
for char in cigar1:
if char == 'M':
totalLength += int(currString)
currString = ''
elif char == 'N':
totalLength += int(currString)
currString = ''
elif char == 'I':
totalLength += int(currString)
currString = ''
elif char == 'D':
currString = ''
elif char == 'S':
totalLength += int(currString)
currString = ''
elif char == 'H':
currString = ''
elif char == 'P':
currString = ''
elif char == 'X':
totalLength += int(currString)
currString = ''
elif char == '=':
totalLength += int(currString)
currString = ''
else:
currString += char
if orient1 == 16 or orient1 == 272:
endPos1 = startPos1 - totalLength
elif orient1 == 0 or orient1 == 256:
endPos1 = startPos1 + totalLength
else:
endPos1 = False
totalLength = 0
currString = ''
for char in cigar2:
if char == 'M':
totalLength += int(currString)
currString = ''
elif char == 'N':
totalLength += int(currString)
currString = ''
elif char == 'I':
totalLength += int(currString)
currString = ''
elif char == 'D':
currString = ''
elif char == 'S':
totalLength += int(currString)
currString = ''
elif char == 'H':
currString = ''
elif char == 'P':
currString = ''
elif char == 'X':
totalLength += int(currString)
currString = ''
elif char == '=':
totalLength += int(currString)
currString = ''
else:
currString += char
if orient2 == 16 or orient2 == 272:
endPos2 = startPos2 - totalLength
elif orient2 == 0 or orient2 == 256:
endPos2 = startPos2 + totalLength
else:
endPos2 = False
if endPos1 != False and endPos2 != False:
outfile2.write(trans1 + '\t' + scaffold1 + '\t' + str(orient1) + '\t' + str(startPos1) + '\t' + str(endPos1) + '\t' + scaffold2 + '\t' + str(orient2) + '\t' + str(startPos2) + '\t' + str(endPos2) + '\n')
counter += 1
for stitch in stitchScaffoldList:
scaff1 = stitch[0]
scaff2 = stitch[1]
scaff1Dict = scaffoldDict[scaff1]
scaff2Dict = scaffoldDict[scaff2]
evidence = stitchDict[stitch]
for transcript in evidence:
transcriptInfo1 = scaff1Dict[transcript]
transcriptInfo2 = scaff2Dict[transcript]
outfile.write(transcript + '\t')
outfile.write(scaff1 + '\t')
for item in transcriptInfo1:
outfile.write(item + '\t')
outfile.write(scaff2 + '\t')
for item in transcriptInfo2:
outfile.write(item + '\t')
outfile.write('\n')
stats['# Scaffolds Mapped To'] = len(scaffoldList)
stats['# Scaffolds Sharing a Transcript'] = numScaffoldsSharingTranscript
outfile.close()
outfile2.close()
return stats
def transcriptMultiMap(samFile):
infile = open(samFile,'r')
transcriptDict = {}
transcriptList = []
scaffoldDict = {}
for line in infile:
if line[0] != '@':
lineSplit = line.split('\t')
transcriptTag = lineSplit[0]
transcriptSplit = transcriptTag.split('_')
if len(transcriptSplit) > 3:
transcriptTag = transcriptSplit[0]
for i in range(1,len(transcriptSplit)-1):
transcriptTag += transcriptSplit[i] + '_'
transcriptTag = transcriptTag[0:-1]
orientation = lineSplit[1]
scaffold = lineSplit[2]
startPos = lineSplit[3]
numMapLocationTag = int(lineSplit[4])
cigar = lineSplit[5]
if numMapLocationTag == 50 or numMapLocationTag == 3:
if transcriptTag in transcriptList:
currDict = transcriptDict[transcriptTag]
scaffIn = False
i = 0
while scaffIn == False and i < len(currDict):
for scaff in currDict:
if scaff == scaffold:
scaffIn == True
i += 1
if scaffIn == False:
scaffInfo = [[orientation, startPos, cigar]]
currDict[scaffold] = scaffInfo
else:
scaffInfo = [[orientation, startPos, cigar]]
currDict[scaffold] += scaffInfo
transcriptDict[transcriptTag] = currDict
else:
transcriptList.append(transcriptTag)
currDict = {}
scaffInfo = [[orientation, startPos, cigar]]
currDict[scaffold] = scaffInfo
transcriptDict[transcriptTag] = currDict
outfile = open('transcript_supports_' + samFile[0:-4] + '.txt','w')
for transcript in transcriptList:
if len(transcriptDict[transcript]) > 1:
scaffDict = transcriptDict[transcript]
outfile.write(transcript + '\t')
for scaffold in scaffDict:
outfile.write(scaffold + '\t')
for transcriptRegion in scaffDict[scaffold]:
for item in transcriptRegion:
outfile.write(str(item) + '\t')
outfile.write('\n')
infile.close()
outfile.close()
def scaffolds2Stitch(Infile):
infile = open(Infile,'r')
outfile = open('scaffolds2stitch_' + Infile[20:-4] + '.txt','w')
scaffoldDict = {}
for line in infile:
scaffoldPairs = []
if line[0] != 'T':
scaffoldList = []
lineSplit = line.split('\t')
i = 0
scaffoldList.append(lineSplit[1])
for item in lineSplit[1:-1]:
if i%4 == 0:
scaffoldList.append(item)
i += 1
for i in range(len(scaffoldList)):
for j in range(len(scaffoldList)):
if scaffoldList[i] != scaffoldList[j]:
if (scaffoldList[i],scaffoldList[j]) not in scaffoldPairs and (scaffoldList[j],scaffoldList[i]) not in scaffoldPairs:
scaffoldPairs.append((scaffoldList[i],scaffoldList[j]))
for pair in scaffoldPairs:
if pair not in scaffoldDict:
scaffoldDict[pair] = [lineSplit[0]]
else:
scaffoldDict[pair] += [lineSplit[0]]
outfile.write('Scaffold 1' + '\t' + 'Scaffold 2' + '\t' + 'Transcripts Supporting' + '\n')
for pair in scaffoldDict:
print pair
outfile.write(pair[0] + '\t' + pair[1])
transcriptList = scaffoldDict[pair]
for transcript in transcriptList:
outfile.write('\t' + transcript)
outfile.write('\n')
infile.close()
outfile.close()
import sys
def matchMisMatch(i,j):
if i == j:
return (2)
else:
return (-1)
def align(Infile):
infile = open(Infile,'r')
outfile = open('alignment.fasta','w')
seqDict = {}
currentSeq = ''
currentSeqName = ''
counter = 0
for line in infile:
counter += 1
if line[0] == '>' and currentSeqName == '':
currentSeqName = line[0:-1]
elif line[0] == '>':
seqDict[currentSeqName] = currentSeq
currentSeqName = line
while currentSeqName[-1] == '\n' or currentSeqName[-1] == '\t' or currentSeqName[-1] == '\r':
currentSeqName = currentSeqName[0:-1]
currentSeq = ''
else:
currentSeq += line
while currentSeq[-1] == '\n' or currentSeq[-1] == '\t' or currentSeq[-1] == '\r':
currentSeq = currentSeq[0:-1]
currentSeq = currentSeq.upper()
seqDict[currentSeqName] = currentSeq
seq1 = ''
for i in seqDict:
if seq1 == '':
seq1 = seqDict[i]
else:
seq2 = seqDict[i]
a = {}
m = len(seq1)
n = len(seq2)
gapScore = (-2)
gapExtend = 1
for i in range(m+1):
a[i,0] = i*gapScore
for j in range(n+1):
a[0,j] = j*gapScore
for i in range(1,m + 1):
for j in range(1,n + 1):
a[i,j] = max((a[i-1,j] + gapScore),(a[i-1,j-1] + matchMisMatch(seq1[i-1],seq2[j-1])),(a[i,j-1] + gapScore))
x = m
y = n
newSeq1 = ''
newSeq2 = ''
while x > 0 and y > 0:
threeSquares = [a[x,y-1],a[x-1,y-1],a[x-1,y]]
if threeSquares[0] > threeSquares[1] and threeSquares[0] > threeSquares[2]:
newSeq1 = '-' + newSeq1
newSeq2 = seq2[y-1] + newSeq2
y = y - 1
elif threeSquares[1] >= threeSquares[2]:
newSeq1 = seq1[x-1] + newSeq1
newSeq2 = seq2[y-1] + newSeq2
x = x - 1
y = y - 1
else:
newSeq1 = seq1[x-1] + newSeq1
newSeq2 = '-' + newSeq2
x = x - 1
outfile.write('>Seq1\n')
outfile.write(newSeq1 + '\n')
outfile.write('>Seq2\n')
outfile.write(newSeq2 + '\n')
infile.close()
outfile.close()
def runCommand():
if sys.argv[1] == 'stitch':
sams = sys.argv[3]
if sams[-4:] == 'fofn':
samList = []
infile = open(sams,'r')
for line in infile:
samList.append(line[0:-1])
infile.close()
stitch(sys.argv[2],samList)
elif sams[-3:] == 'sam':
samList = [sams]
stitch(sys.argv[2],samList)
else:
print 'Bad formatting'
return 'Bad formatting'
elif sys.argv[1] == 'merge':
reads = sys.argv[4]
if reads[-4:] == 'fofn':
readList = []
infile = open(reads,'r')
for line in infile:
readList.append(line[0:-1])
infile.close()
mergeStitches(sys.argv[2],sys.argv[3],readList)
elif reads[-5:] == 'fasta':
mergeStitches(sys.argv[2],sys.argv[3],sys.argv[4])
else:
print 'Bad formatting'
return 'Bad formatting'
else:
print 'No command specified'
return 'No command specified'
runCommand() | en | 0.608678 | #Critical for proper stitching! #or numMapLocationTag == 3: #remove second condition for real code | 2.59367 | 3 |
Chat Room/base/views.py | Krishna-Patil/Chat-Room-Web-Application | 0 | 6613946 | from django.http import HttpResponse
from django.shortcuts import render, redirect
from .forms import RoomCreateForm, RoomUpdateForm, UserUpdateForm, CustomUserCreationForm
from django.shortcuts import get_object_or_404
from django.db.models import Q
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .models import *
# Create your views here.
def login_view(request):
page = 'login'
if request.user.is_authenticated:
return redirect('home')
if request.method == 'POST':
email = request.POST.get('Email').lower()
password = request.POST.get('password')
try:
user = User.objects.get(email=email)
except:
messages.error(request, "user does not exist.")
user = authenticate(request, email=email, password=password)
if user:
login(request, user)
return redirect('home')
else:
messages.error(
request, "wrong Password.")
return render(request, 'base/login_register.html', {'page': page})
def logout_view(request):
logout(request)
return redirect('home')
def signup_view(request):
if request.method == 'POST':
data = CustomUserCreationForm(request.POST)
if data.is_valid():
user = data.save(commit=False)
user.username = user.username.lower()
user.save()
login(request, user)
return redirect('home')
else:
HttpResponse("issues during signing up.")
form = CustomUserCreationForm()
return render(request, 'base/login_register.html', {'form': form})
def home_view(request):
q = request.GET.get('q') if request.GET.get('q') != None else ''
rooms = Room.objects.filter(
Q(name__icontains=q) |
Q(topic__name__icontains=q) |
Q(host__username__icontains=q) |
Q(discription__icontains=q)
)
rooms_count = rooms.count()
topics = Topic.objects.all()[:5]
room_messages = Message.objects.filter(Q(room__topic__name__icontains=q))
context = {'rooms': rooms, 'topics': topics,
'rooms_count': rooms_count, 'room_messages': room_messages}
return render(request, 'base/home.html', context)
def room_view(request, pk):
room = Room.objects.get(id=pk)
room_messages = room.message_set.all().order_by('-created')
participants = room.participants.all()
if request.method == 'POST':
text = request.POST.get('text')
msg = Message.objects.create(text=text, room=room, user=request.user)
msg.save()
room.participants.add(request.user)
return redirect('room', pk)
context = {
'room': room,
'room_messages': room_messages,
'participants': participants,
}
return render(request, 'base/room.html', context)
@login_required(login_url='login')
def create_room_view(request):
form = RoomCreateForm()
topics = Topic.objects.all()
if request.method == 'POST':
topic_name = request.POST.get('topic')
topic, created = Topic.objects.get_or_create(name=topic_name)
Room.objects.create(
host=request.user,
topic=topic,
name=request.POST.get('name'),
discription=request.POST.get('discription')
)
return redirect('home')
context = {'form': form, 'topics': topics}
return render(request, 'base/room_form.html', context)
@login_required(login_url='login')
def update_room_view(request, pk):
room = get_object_or_404(Room, pk=pk)
topics = Topic.objects.all()
if request.user != room.host:
return HttpResponse(" What are you doing here? ")
if request.method == 'POST':
topic_name = request.POST.get('topic')
topic, created = Topic.objects.get_or_create(name=topic_name)
room.name = request.POST.get('name')
room.topic = topic
room.discription = request.POST.get('discription')
room.save()
return redirect('room', pk)
else:
form = RoomUpdateForm(instance=room)
context = {'form': form, 'topics': topics, 'room': room}
return render(request, 'base/room_form.html', context)
@login_required(login_url='login')
def delete_room_view(request, pk):
room = get_object_or_404(Room, pk=pk)
if request.user != room.host:
return HttpResponse(" What are you doing here? ")
if request.method == 'POST':
room.delete()
return redirect('home')
else:
context = {'obj': room}
return render(request, 'base/delete.html', context)
def delete_message_view(request, pk):
msg = Message.objects.get(pk=pk)
if request.user != msg.user:
return HttpResponse("What are you doing here?")
if request.method == 'POST':
msg.delete()
return redirect('home')
context = {'obj': msg}
return render(request, 'base/delete.html', context)
def user_profile_view(request, pk):
user = User.objects.get(pk=pk)
rooms = user.room_set.all()
room_messages = user.message_set.all()
topics = Topic.objects.all()
rooms_count = rooms.count()
context = {'user': user, 'rooms': rooms, 'rooms_count': rooms_count,
'room_messages': room_messages, 'topics': topics}
return render(request, 'base/user_profile.html', context)
@login_required(login_url='login')
def user_update_view(request):
form = UserUpdateForm(instance=request.user)
if request.method == 'POST':
data = UserUpdateForm(request.POST, request.FILES,
instance=request.user)
if data.is_valid():
data.save()
return redirect('user_profile', request.user.id)
context = {'form': form}
return render(request, 'base/update_user.html', context)
def topics_view(request):
q = request.POST.get('q') if request.POST.get('q') is not None else ''
topics = Topic.objects.filter(name__icontains=q)
topics_count = topics.count()
context = {
'topics': topics,
'topics_count': topics_count,
}
return render(request, 'base/topics.html', context)
| from django.http import HttpResponse
from django.shortcuts import render, redirect
from .forms import RoomCreateForm, RoomUpdateForm, UserUpdateForm, CustomUserCreationForm
from django.shortcuts import get_object_or_404
from django.db.models import Q
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .models import *
# Create your views here.
def login_view(request):
page = 'login'
if request.user.is_authenticated:
return redirect('home')
if request.method == 'POST':
email = request.POST.get('Email').lower()
password = request.POST.get('password')
try:
user = User.objects.get(email=email)
except:
messages.error(request, "user does not exist.")
user = authenticate(request, email=email, password=password)
if user:
login(request, user)
return redirect('home')
else:
messages.error(
request, "wrong Password.")
return render(request, 'base/login_register.html', {'page': page})
def logout_view(request):
logout(request)
return redirect('home')
def signup_view(request):
if request.method == 'POST':
data = CustomUserCreationForm(request.POST)
if data.is_valid():
user = data.save(commit=False)
user.username = user.username.lower()
user.save()
login(request, user)
return redirect('home')
else:
HttpResponse("issues during signing up.")
form = CustomUserCreationForm()
return render(request, 'base/login_register.html', {'form': form})
def home_view(request):
q = request.GET.get('q') if request.GET.get('q') != None else ''
rooms = Room.objects.filter(
Q(name__icontains=q) |
Q(topic__name__icontains=q) |
Q(host__username__icontains=q) |
Q(discription__icontains=q)
)
rooms_count = rooms.count()
topics = Topic.objects.all()[:5]
room_messages = Message.objects.filter(Q(room__topic__name__icontains=q))
context = {'rooms': rooms, 'topics': topics,
'rooms_count': rooms_count, 'room_messages': room_messages}
return render(request, 'base/home.html', context)
def room_view(request, pk):
room = Room.objects.get(id=pk)
room_messages = room.message_set.all().order_by('-created')
participants = room.participants.all()
if request.method == 'POST':
text = request.POST.get('text')
msg = Message.objects.create(text=text, room=room, user=request.user)
msg.save()
room.participants.add(request.user)
return redirect('room', pk)
context = {
'room': room,
'room_messages': room_messages,
'participants': participants,
}
return render(request, 'base/room.html', context)
@login_required(login_url='login')
def create_room_view(request):
form = RoomCreateForm()
topics = Topic.objects.all()
if request.method == 'POST':
topic_name = request.POST.get('topic')
topic, created = Topic.objects.get_or_create(name=topic_name)
Room.objects.create(
host=request.user,
topic=topic,
name=request.POST.get('name'),
discription=request.POST.get('discription')
)
return redirect('home')
context = {'form': form, 'topics': topics}
return render(request, 'base/room_form.html', context)
@login_required(login_url='login')
def update_room_view(request, pk):
room = get_object_or_404(Room, pk=pk)
topics = Topic.objects.all()
if request.user != room.host:
return HttpResponse(" What are you doing here? ")
if request.method == 'POST':
topic_name = request.POST.get('topic')
topic, created = Topic.objects.get_or_create(name=topic_name)
room.name = request.POST.get('name')
room.topic = topic
room.discription = request.POST.get('discription')
room.save()
return redirect('room', pk)
else:
form = RoomUpdateForm(instance=room)
context = {'form': form, 'topics': topics, 'room': room}
return render(request, 'base/room_form.html', context)
@login_required(login_url='login')
def delete_room_view(request, pk):
room = get_object_or_404(Room, pk=pk)
if request.user != room.host:
return HttpResponse(" What are you doing here? ")
if request.method == 'POST':
room.delete()
return redirect('home')
else:
context = {'obj': room}
return render(request, 'base/delete.html', context)
def delete_message_view(request, pk):
msg = Message.objects.get(pk=pk)
if request.user != msg.user:
return HttpResponse("What are you doing here?")
if request.method == 'POST':
msg.delete()
return redirect('home')
context = {'obj': msg}
return render(request, 'base/delete.html', context)
def user_profile_view(request, pk):
user = User.objects.get(pk=pk)
rooms = user.room_set.all()
room_messages = user.message_set.all()
topics = Topic.objects.all()
rooms_count = rooms.count()
context = {'user': user, 'rooms': rooms, 'rooms_count': rooms_count,
'room_messages': room_messages, 'topics': topics}
return render(request, 'base/user_profile.html', context)
@login_required(login_url='login')
def user_update_view(request):
form = UserUpdateForm(instance=request.user)
if request.method == 'POST':
data = UserUpdateForm(request.POST, request.FILES,
instance=request.user)
if data.is_valid():
data.save()
return redirect('user_profile', request.user.id)
context = {'form': form}
return render(request, 'base/update_user.html', context)
def topics_view(request):
q = request.POST.get('q') if request.POST.get('q') is not None else ''
topics = Topic.objects.filter(name__icontains=q)
topics_count = topics.count()
context = {
'topics': topics,
'topics_count': topics_count,
}
return render(request, 'base/topics.html', context)
| en | 0.968116 | # Create your views here. | 2.17785 | 2 |
tasks.py | laurenrevere/waterbutler | 0 | 6613947 | import os
from invoke import task, run
WHEELHOUSE_PATH = os.environ.get('WHEELHOUSE')
@task
def wheelhouse(develop=False):
req_file = 'dev-requirements.txt' if develop else 'requirements.txt'
cmd = 'pip wheel --find-links={} -r {} --wheel-dir={}'.format(WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH)
run(cmd, pty=True)
@task
def install(develop=False, upgrade=False):
run('python setup.py develop')
req_file = 'dev-requirements.txt' if develop else 'requirements.txt'
cmd = 'pip install -r {}'.format(req_file)
if upgrade:
cmd += ' --upgrade'
if WHEELHOUSE_PATH:
cmd += ' --no-index --find-links={}'.format(WHEELHOUSE_PATH)
run(cmd, pty=True)
@task
def flake():
run('flake8 .', pty=True)
@task
def test(verbose=False):
flake()
cmd = 'py.test --cov-report term-missing --cov waterbutler tests'
if verbose:
cmd += ' -v'
run(cmd, pty=True)
@task
def celery():
from waterbutler.tasks.app import app
app.worker_main(['worker', '-l', 'INFO'])
@task
def rabbitmq():
run('rabbitmq-server', pty=True)
@task
def server():
from waterbutler.server.app import serve
serve()
| import os
from invoke import task, run
WHEELHOUSE_PATH = os.environ.get('WHEELHOUSE')
@task
def wheelhouse(develop=False):
req_file = 'dev-requirements.txt' if develop else 'requirements.txt'
cmd = 'pip wheel --find-links={} -r {} --wheel-dir={}'.format(WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH)
run(cmd, pty=True)
@task
def install(develop=False, upgrade=False):
run('python setup.py develop')
req_file = 'dev-requirements.txt' if develop else 'requirements.txt'
cmd = 'pip install -r {}'.format(req_file)
if upgrade:
cmd += ' --upgrade'
if WHEELHOUSE_PATH:
cmd += ' --no-index --find-links={}'.format(WHEELHOUSE_PATH)
run(cmd, pty=True)
@task
def flake():
run('flake8 .', pty=True)
@task
def test(verbose=False):
flake()
cmd = 'py.test --cov-report term-missing --cov waterbutler tests'
if verbose:
cmd += ' -v'
run(cmd, pty=True)
@task
def celery():
from waterbutler.tasks.app import app
app.worker_main(['worker', '-l', 'INFO'])
@task
def rabbitmq():
run('rabbitmq-server', pty=True)
@task
def server():
from waterbutler.server.app import serve
serve()
| none | 1 | 1.959957 | 2 | |
otcextensions/sdk/cce/v3/cluster.py | kucerakk/python-otcextensions | 0 | 6613948 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# import six
from openstack import exceptions
from openstack import resource
from otcextensions.sdk.cce.v3 import _base
# from otcextensions.sdk.cce.v3 import cluster_node
#
# class NodeListSpec(object):
# # Properties
# host_list = resource.Body('hostList', type=list,
# list_type=cluster_node.ClusterNode)
#
#
# class ClusterNodeList(object):
# # Properties
# #: Spec
# spec = resource.Body('spec', type=NodeListSpec)
class HostNetworkSpec(resource.Resource):
# def __init__(self, **kwargs):
# self.highway_subnet = kwargs.pop(
# 'highwaySubnet', kwargs.pop('highway_subnet', None))
# self.security_group = kwargs.pop(
# 'highwaySubnet', kwargs.pop('highway_subnet', None))
# Properties
#: ID of the high-speed network that is used to create a bare metal node.
highway_subnet = resource.Body('highwaySubnet')
#: Security group.
security_group = resource.Body('SecurityGroup')
#: ID of the subnet that is used to create a node.
subnet = resource.Body('subnet')
#: ID of the VPC that is used to create a node.
vpc = resource.Body('vpc')
class ClusterSpec(resource.Resource):
#: Authentication
authentication = resource.Body('authentication', type=dict)
#: Billing mode of the cluster. Currently, only pay-per-use is supported.
billing = resource.Body('billing_mode', type=int)
#: Container network parameters.
container_network = resource.Body('containerNetwork', type=dict)
#: Cluster description.
description = resource.Body('description')
#: Extended parameters.
extended_param = resource.Body('extendParam', type=dict)
#: Cluster flavors.
flavor = resource.Body('flavor')
#: Node network parameters.
host_network = resource.Body('hostNetwork', type=HostNetworkSpec)
#: Cluster type.
type = resource.Body('type')
#: Cluster version ['v1.9.2-r2', 'v1.11.3-r1'].
version = resource.Body('version')
class StatusSpec(resource.Resource):
# Properties
#: Cluster status.
status = resource.Body('phase')
#: Access address of the kube-apiserver in the cluster.
endpoints = resource.Body('endpoints', type=dict)
class Cluster(_base.Resource):
base_path = '/clusters'
resources_key = ''
resource_key = ''
allow_create = True
allow_fetch = True
allow_commit = True
allow_delete = True
allow_list = True
# Properties
#: specification
spec = resource.Body('spec', type=ClusterSpec)
#: Cluster status
status = resource.Body('status', type=StatusSpec)
@classmethod
def new(cls, **kwargs):
if 'kind' not in kwargs:
kwargs['kind'] = 'Cluster'
if 'apiVersion' not in kwargs:
kwargs['apiVersion'] = 'v3'
metadata = kwargs.get('metadata', '')
if 'name' in kwargs and not metadata:
name = kwargs.pop('name', '')
kwargs['metadata'] = {
'name': name
}
return cls(_synchronized=False, **kwargs)
def __getattribute__(self, name):
"""Return an attribute on this instance
This is mostly a pass-through except for a specialization on
the 'id' name, as this can exist under a different name via the
`alternate_id` argument to resource.Body.
"""
if name == 'id' or name == 'name':
if name in self._body:
return self._body[name]
else:
try:
metadata = self._body['metadata']
if name == 'id':
if isinstance(metadata, dict):
return metadata['uid']
elif isinstance(metadata, _base.Metadata):
return metadata._body[metadata._alternate_id()]
else:
if isinstance(metadata, dict):
return metadata['name']
elif isinstance(metadata, _base.Metadata):
return metadata.name
except KeyError:
return None
else:
return object.__getattribute__(self, name)
@classmethod
def list(cls, session, paginated=True, base_path=None, **params):
"""This method is a generator which yields resource objects.
This resource object list generator handles pagination and takes query
params for response filtering.
:param session: The session to use for making this request.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param bool paginated: ``True`` if a GET to this resource returns
a paginated series of responses, or ``False``
if a GET returns only one page of data.
**When paginated is False only one
page of data will be returned regardless
of the API's support of pagination.**
:param str base_path: Base part of the URI for listing resources, if
different from
:data:`~openstack.resource.Resource.base_path`.
:param dict params: These keyword arguments are passed through the
:meth:`~openstack.resource.QueryParamter._transpose` method
to find if any of them match expected query parameters to be
sent in the *params* argument to
:meth:`~keystoneauth1.adapter.Adapter.get`. They are additionally
checked against the
:data:`~openstack.resource.Resource.base_path` format string
to see if any path fragments need to be filled in by the contents
of this argument.
:return: A generator of :class:`Resource` objects.
:raises: :exc:`~openstack.exceptions.MethodNotSupported` if
:data:`Resource.allow_list` is not set to ``True``.
:raises: :exc:`~openstack.exceptions.InvalidResourceQuery` if query
contains invalid params.
"""
if not cls.allow_list:
raise exceptions.MethodNotSupported(cls, "list")
session = cls._get_session(session)
microversion = cls._get_microversion_for_list(session)
if base_path is None:
base_path = cls.base_path
cls._query_mapping._validate(params, base_path=base_path)
query_params = cls._query_mapping._transpose(params)
uri = base_path % params
while uri:
# Copy query_params due to weird mock unittest interactions
response = session.get(
uri,
headers={
'Accept': 'application/json',
'Content-Type': 'application/json'},
params=query_params.copy())
exceptions.raise_from_response(response)
if response.json() and 'items' in response.json():
data = response.json()['items'] or []
if cls.resources_key:
resources = data[cls.resources_key]
else:
resources = data
if not isinstance(resources, list):
resources = [resources]
for raw_resource in resources:
value = cls.existing(
microversion=microversion,
connection=session._get_connection(),
**raw_resource)
yield value
return
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# import six
from openstack import exceptions
from openstack import resource
from otcextensions.sdk.cce.v3 import _base
# from otcextensions.sdk.cce.v3 import cluster_node
#
# class NodeListSpec(object):
# # Properties
# host_list = resource.Body('hostList', type=list,
# list_type=cluster_node.ClusterNode)
#
#
# class ClusterNodeList(object):
# # Properties
# #: Spec
# spec = resource.Body('spec', type=NodeListSpec)
class HostNetworkSpec(resource.Resource):
# def __init__(self, **kwargs):
# self.highway_subnet = kwargs.pop(
# 'highwaySubnet', kwargs.pop('highway_subnet', None))
# self.security_group = kwargs.pop(
# 'highwaySubnet', kwargs.pop('highway_subnet', None))
# Properties
#: ID of the high-speed network that is used to create a bare metal node.
highway_subnet = resource.Body('highwaySubnet')
#: Security group.
security_group = resource.Body('SecurityGroup')
#: ID of the subnet that is used to create a node.
subnet = resource.Body('subnet')
#: ID of the VPC that is used to create a node.
vpc = resource.Body('vpc')
class ClusterSpec(resource.Resource):
#: Authentication
authentication = resource.Body('authentication', type=dict)
#: Billing mode of the cluster. Currently, only pay-per-use is supported.
billing = resource.Body('billing_mode', type=int)
#: Container network parameters.
container_network = resource.Body('containerNetwork', type=dict)
#: Cluster description.
description = resource.Body('description')
#: Extended parameters.
extended_param = resource.Body('extendParam', type=dict)
#: Cluster flavors.
flavor = resource.Body('flavor')
#: Node network parameters.
host_network = resource.Body('hostNetwork', type=HostNetworkSpec)
#: Cluster type.
type = resource.Body('type')
#: Cluster version ['v1.9.2-r2', 'v1.11.3-r1'].
version = resource.Body('version')
class StatusSpec(resource.Resource):
# Properties
#: Cluster status.
status = resource.Body('phase')
#: Access address of the kube-apiserver in the cluster.
endpoints = resource.Body('endpoints', type=dict)
class Cluster(_base.Resource):
base_path = '/clusters'
resources_key = ''
resource_key = ''
allow_create = True
allow_fetch = True
allow_commit = True
allow_delete = True
allow_list = True
# Properties
#: specification
spec = resource.Body('spec', type=ClusterSpec)
#: Cluster status
status = resource.Body('status', type=StatusSpec)
@classmethod
def new(cls, **kwargs):
if 'kind' not in kwargs:
kwargs['kind'] = 'Cluster'
if 'apiVersion' not in kwargs:
kwargs['apiVersion'] = 'v3'
metadata = kwargs.get('metadata', '')
if 'name' in kwargs and not metadata:
name = kwargs.pop('name', '')
kwargs['metadata'] = {
'name': name
}
return cls(_synchronized=False, **kwargs)
def __getattribute__(self, name):
"""Return an attribute on this instance
This is mostly a pass-through except for a specialization on
the 'id' name, as this can exist under a different name via the
`alternate_id` argument to resource.Body.
"""
if name == 'id' or name == 'name':
if name in self._body:
return self._body[name]
else:
try:
metadata = self._body['metadata']
if name == 'id':
if isinstance(metadata, dict):
return metadata['uid']
elif isinstance(metadata, _base.Metadata):
return metadata._body[metadata._alternate_id()]
else:
if isinstance(metadata, dict):
return metadata['name']
elif isinstance(metadata, _base.Metadata):
return metadata.name
except KeyError:
return None
else:
return object.__getattribute__(self, name)
@classmethod
def list(cls, session, paginated=True, base_path=None, **params):
"""This method is a generator which yields resource objects.
This resource object list generator handles pagination and takes query
params for response filtering.
:param session: The session to use for making this request.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param bool paginated: ``True`` if a GET to this resource returns
a paginated series of responses, or ``False``
if a GET returns only one page of data.
**When paginated is False only one
page of data will be returned regardless
of the API's support of pagination.**
:param str base_path: Base part of the URI for listing resources, if
different from
:data:`~openstack.resource.Resource.base_path`.
:param dict params: These keyword arguments are passed through the
:meth:`~openstack.resource.QueryParamter._transpose` method
to find if any of them match expected query parameters to be
sent in the *params* argument to
:meth:`~keystoneauth1.adapter.Adapter.get`. They are additionally
checked against the
:data:`~openstack.resource.Resource.base_path` format string
to see if any path fragments need to be filled in by the contents
of this argument.
:return: A generator of :class:`Resource` objects.
:raises: :exc:`~openstack.exceptions.MethodNotSupported` if
:data:`Resource.allow_list` is not set to ``True``.
:raises: :exc:`~openstack.exceptions.InvalidResourceQuery` if query
contains invalid params.
"""
if not cls.allow_list:
raise exceptions.MethodNotSupported(cls, "list")
session = cls._get_session(session)
microversion = cls._get_microversion_for_list(session)
if base_path is None:
base_path = cls.base_path
cls._query_mapping._validate(params, base_path=base_path)
query_params = cls._query_mapping._transpose(params)
uri = base_path % params
while uri:
# Copy query_params due to weird mock unittest interactions
response = session.get(
uri,
headers={
'Accept': 'application/json',
'Content-Type': 'application/json'},
params=query_params.copy())
exceptions.raise_from_response(response)
if response.json() and 'items' in response.json():
data = response.json()['items'] or []
if cls.resources_key:
resources = data[cls.resources_key]
else:
resources = data
if not isinstance(resources, list):
resources = [resources]
for raw_resource in resources:
value = cls.existing(
microversion=microversion,
connection=session._get_connection(),
**raw_resource)
yield value
return
| en | 0.694216 | # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import six # from otcextensions.sdk.cce.v3 import cluster_node # # class NodeListSpec(object): # # Properties # host_list = resource.Body('hostList', type=list, # list_type=cluster_node.ClusterNode) # # # class ClusterNodeList(object): # # Properties # #: Spec # spec = resource.Body('spec', type=NodeListSpec) # def __init__(self, **kwargs): # self.highway_subnet = kwargs.pop( # 'highwaySubnet', kwargs.pop('highway_subnet', None)) # self.security_group = kwargs.pop( # 'highwaySubnet', kwargs.pop('highway_subnet', None)) # Properties #: ID of the high-speed network that is used to create a bare metal node. #: Security group. #: ID of the subnet that is used to create a node. #: ID of the VPC that is used to create a node. #: Authentication #: Billing mode of the cluster. Currently, only pay-per-use is supported. #: Container network parameters. #: Cluster description. #: Extended parameters. #: Cluster flavors. #: Node network parameters. #: Cluster type. #: Cluster version ['v1.9.2-r2', 'v1.11.3-r1']. # Properties #: Cluster status. #: Access address of the kube-apiserver in the cluster. # Properties #: specification #: Cluster status Return an attribute on this instance This is mostly a pass-through except for a specialization on the 'id' name, as this can exist under a different name via the `alternate_id` argument to resource.Body. This method is a generator which yields resource objects. This resource object list generator handles pagination and takes query params for response filtering. :param session: The session to use for making this request. :type session: :class:`~keystoneauth1.adapter.Adapter` :param bool paginated: ``True`` if a GET to this resource returns a paginated series of responses, or ``False`` if a GET returns only one page of data. **When paginated is False only one page of data will be returned regardless of the API's support of pagination.** :param str base_path: Base part of the URI for listing resources, if different from :data:`~openstack.resource.Resource.base_path`. :param dict params: These keyword arguments are passed through the :meth:`~openstack.resource.QueryParamter._transpose` method to find if any of them match expected query parameters to be sent in the *params* argument to :meth:`~keystoneauth1.adapter.Adapter.get`. They are additionally checked against the :data:`~openstack.resource.Resource.base_path` format string to see if any path fragments need to be filled in by the contents of this argument. :return: A generator of :class:`Resource` objects. :raises: :exc:`~openstack.exceptions.MethodNotSupported` if :data:`Resource.allow_list` is not set to ``True``. :raises: :exc:`~openstack.exceptions.InvalidResourceQuery` if query contains invalid params. # Copy query_params due to weird mock unittest interactions | 1.915744 | 2 |
DP/car_rental_problem.py | vuk119/RL | 0 | 6613949 | """
Car Rental Problem
Two renting car locations.
-If a customer comes and rents a car the reward is 10$.
-If he is out of cars the business is lost.
-Cars are avaliable a day after they are returned
-Number of requested cars ~ Poisson(3)
-Number of returned cars ~ Poisson(4)
-No more than 20 cars at each location. (Any extra cars just dissappear)
-Each night you can move 0,1,2,3,4 or 5 cars between the two locations
-The cost of moving a car is 2$
-Gamma = 0.9
states: (0,0) -> (20,20)
actions: [-5,-4,-3,-2,-1,0,1,2,3,4,5] - How many cars we move from location 1 to location 2
The value function of states (0,x) and (x,0) is 0
"""
import time
import os
import pickle
import multiprocessing
from multiprocessing.pool import ThreadPool as Pool
import scipy.stats as stats
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
moving_cost = 2
rent_price = 10
max_cars = 20
request_mean_1 = 3
request_mean_2 = 4
return_mean_1 = 3
return_mean_2 = 2
compute = True
def compute_dynamics(init_state):
global max_cars, request_mean_1, request_mean_2, return_mean_1, return_mean_2, rent_price, moving_cost
i, j = init_state
transition_probabilities_and_rewards = {}
#HANDLE REQUESTS
#CASE 1: LOST BUSSINES
prob_lost_1 = 1 - stats.poisson.cdf(k=i, mu = request_mean_1)
prob_lost_2 = 1 - stats.poisson.cdf(k=j, mu = request_mean_2)
prob_lost = prob_lost_1 + prob_lost_2 - prob_lost_1*prob_lost_2
#CASE 2: NOT LOST
#LOOP OVER VALID RENTS
for req_1 in range(i + 1):
for req_2 in range(j + 1):
req_prob = stats.poisson.pmf(k = req_1, mu = request_mean_1) * stats.poisson.pmf(k = req_2, mu = request_mean_2)
reward = (req_1 + req_2)*rent_price
#LOOP OVER RETURNS
for ret_1 in range(max_cars + 1 - (i-req_1)):
probb = 0
for ret_2 in range(max_cars + 1 - (j-req_2)):
#ADD IFS
if ret_1 == max_cars - (i-req_1):
ret_1_prob = 1 - stats.poisson.cdf(k = ret_1 - 1, mu = return_mean_1)
else:
ret_1_prob = stats.poisson.pmf(k = ret_1, mu = return_mean_1)
if ret_2 == max_cars - (j-req_2):
ret_2_prob = 1 - stats.poisson.cdf(k = ret_2 - 1, mu = return_mean_2)
else:
ret_2_prob = stats.poisson.pmf(k = ret_2, mu = return_mean_2)
final_state = (i - req_1 + ret_1, j - req_2 + ret_2)
ret_prob = ret_1_prob * ret_2_prob
probb += ret_2_prob
if final_state not in transition_probabilities_and_rewards:
transition_probabilities_and_rewards[final_state] = []
transition_probabilities_and_rewards[final_state].append((req_prob*ret_prob, reward))
transition_probabilities_and_expected_rewards = {}
transition_probabilities = np.zeros((max_cars+1, max_cars+1))
expected_rewards = np.zeros((max_cars+1, max_cars+1))
for state in transition_probabilities_and_rewards:
total_prob = 0
expected_reward = 0
for prob, reward in transition_probabilities_and_rewards[state]:
total_prob += prob
expected_reward += prob*reward
transition_probabilities[state[0], state[1]] = total_prob
expected_rewards[state[0], state[1]] = expected_reward / total_prob
if 1 - np.sum(np.sum(transition_probabilities)) - prob_lost > 0.01:
print("ATTENTION")
print("Transition probabilities for this state do not add up to 1!")
print(init_state, "{:.2f}".format(100*(1 - np.sum(np.sum(transition_probabilities)) - prob_lost)))
return transition_probabilities, expected_rewards, prob_lost
def compute_dynamics_one_it(i):
global max_cars, transition_probabilities, expected_rewards, lost_probabilities
start = time.time()
for j in range(max_cars+1):
transition_probabilities[i,j,:,:], expected_rewards[i,j,:,:], lost_probabilities[i,j] = compute_dynamics((i,j))
print(i,j)
print(i, time.time() - start,'s')
def compute_full_dynamics_in_parallel():
global max_cars, transition_probabilities, expected_rewards, lost_probabilities
pool_size = multiprocessing.cpu_count()-1
#pool_size = 4
pool = Pool(pool_size)
start = time.time()
for i in range(max_cars+1):
pool.apply_async(compute_dynamics_one_it, (i,))
pool.close()
pool.join()
with open('./pymdp/saved_variab les/transition_probabilities.pkl', 'wb') as f:
pickle.dump(transition_probabilities, f)
with open('./pymdp/saved_variables/expected_rewards.pkl', 'wb') as f:
pickle.dump(expected_rewards, f)
with open('./pymdp/saved_variables/lost_probabilities.pkl', 'wb') as f:
pickle.dump(lost_probabilities, f)
def compute_full_dynamics():
global max_cars, transition_probabilities, expected_rewards, lost_probabilities
start = time.time()
for i in range(max_cars+1):
print(i, time.time() - start,'s')
for j in range(max_cars+1):
transition_probabilities[i,j,:,:], expected_rewards[i,j,:,:], lost_probabilities[i,j] = compute_dynamics((i,j))
with open('./pymdp/saved_variables/transition_probabilities.pkl', 'wb') as f:
pickle.dump(transition_probabilities, f)
with open('./pymdp/saved_variables/expected_rewards.pkl', 'wb') as f:
pickle.dump(expected_rewards, f)
with open('./pymdp/saved_variables/lost_probabilities.pkl', 'wb') as f:
pickle.dump(lost_probabilities, f)
def load_dynamics():
global transition_probabilities, expected_rewards, lost_probabilities
with open('./saved_variables/transition_probabilities.pkl', 'rb') as f:
transition_probabilities = pickle.load(f)
with open('./saved_variables/expected_rewards.pkl', 'rb') as f:
expected_rewards = pickle.load(f)
with open('./saved_variables/lost_probabilities.pkl', 'rb') as f:
lost_probabilities = pickle.load(f)
def value_iteration(gamma = 0.9, delta = 10**(-3)):
global max_cars, request_mean_1, request_mean_2, return_mean_1, return_mean_2, rent_price, moving_cost, transition_probabilities, expected_rewards, optimal_policy
max_improvement = 10
v = np.zeros((max_cars + 1, max_cars + 1))
while max_improvement > delta:
#print('HI', max_improvement)
max_improvement = 0
#LOOP OVER STATES
for i in range(max_cars + 1):
for j in range(max_cars + 1):
best_value = -1
#LOOP OVER ACTIONS
for a in actions:
new_i = i - a
new_j = j + a
if new_i>max_cars or new_i<0 or new_j>max_cars or new_j<0:
continue
action_value = np.sum(np.sum(np.multiply(transition_probabilities[new_i, new_j], gamma*v + expected_rewards[new_i,new_j]))) - abs(a)*moving_cost
if action_value > best_value:
best_value = action_value
improvement = abs(v[i,j] - best_value)
v[i,j] = best_value
if improvement > max_improvement:
max_improvement = improvement
return v
def get_optimal_policy(v, gamma = 0.9):
optimal_policy = np.zeros((max_cars + 1, max_cars + 1), dtype = int)
for i in range(max_cars + 1):
for j in range(max_cars + 1):
best_value = -1
#LOOP OVER ACTIONS
for a in actions:
new_i = i - a
new_j = j + a
if new_i>max_cars or new_i<0 or new_j>max_cars or new_j<0:
continue
action_value = np.sum(np.sum(np.multiply(transition_probabilities[new_i, new_j], gamma*v + expected_rewards[new_i,new_j]))) - abs(a)*moving_cost
if action_value >= best_value:
best_value = action_value
optimal_policy[i,j] = a
return optimal_policy
def policy_evaluation(policy, gamma = 0.9, delta = 10**(-3)):
global max_cars, request_mean_1, request_mean_2, return_mean_1, return_mean_2, rent_price, moving_cost, v, transition_probabilities, expected_rewards
v = np.zeros((max_cars + 1, max_cars + 1))
max_improvement = delta + 1
while max_improvement > delta:
max_improvement = 0
#LOOP OVER STATES
for i in range(max_cars + 1):
for j in range(max_cars + 1):
action = policy[i,j]
new_i = i - action
new_j = j + action
new_value = np.sum(np.sum(np.multiply(transition_probabilities[new_i, new_j], gamma*v + expected_rewards[new_i,new_j]))) - abs(action)*moving_cost
improvement = abs(new_value - v[i,j])
if improvement > max_improvement:
max_improvement = improvement
v[i,j] = new_value
return v
def policy_improvement(policy, v, gamma = 0.9, delta = 10**(-3)):
global max_cars, request_mean_1, request_mean_2, return_mean_1, return_mean_2, rent_price, moving_cost, transition_probabilities, expected_rewards
policy_stable = True
for i in range(max_cars + 1):
for j in range(max_cars + 1):
current_action = policy[i,j]
new_i = i - current_action
new_j = j + current_action
current_action_value = np.sum(np.sum(np.multiply(transition_probabilities[new_i, new_j], gamma*v + expected_rewards[new_i,new_j]))) - abs(current_action)*moving_cost
#LOOP OVER ACTIONS
for a in actions:
new_i = i - a
new_j = j + a
if new_i>max_cars or new_i<0 or new_j>max_cars or new_j<0:
continue
action_value = np.sum(np.sum(np.multiply(transition_probabilities[new_i, new_j], gamma*v + expected_rewards[new_i,new_j]))) - abs(a)*moving_cost
if action_value > current_action_value:
v[i,j] = action_value
policy[i,j] = a
policy_stable = False
current_action_value = action_value
return policy, policy_stable
def policy_iteration(k = 3, gamma = 0.9, delta = 10**(-3)):
v = np.zeros((max_cars + 1, max_cars + 1))
policy = np.zeros((max_cars + 1, max_cars + 1), dtype = int)
policy_stable = False
while policy_stable is False:
v = policy_evaluation(policy, gamma, delta)
policy, policy_stable = policy_improvement(policy, v, gamma, delta)
return v, policy
def plot_value_function3D():
X_arr = []
Y_arr = []
Z_arr = []
for i in range(max_cars+1):
for j in range(max_cars+1):
X_arr.append(i)
Y_arr.append(j)
Z_arr.append(v[i,j])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X_arr, Y_arr, Z_arr)
plt.show()
def plot_optimal_policy3D():
X_arr = []
Y_arr = []
Z_arr = []
for i in range(max_cars+1):
for j in range(max_cars+1):
X_arr.append(i)
Y_arr.append(j)
Z_arr.append(optimal_policy[i,j])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X_arr, Y_arr, Z_arr)
plt.show()
value_function = np.zeros((max_cars + 1, max_cars + 1))
actions = [a for a in range(-5, 6)]
transition_probabilities = np.zeros((max_cars + 1, max_cars + 1, max_cars + 1, max_cars + 1))
expected_rewards = np.zeros((max_cars + 1, max_cars + 1, max_cars + 1, max_cars + 1))
lost_probabilities = np.zeros((max_cars + 1, max_cars + 1))
#compute_full_dynamics_in_parallel()
load_dynamics()
value_function = value_iteration()
optimal_policy = get_optimal_policy(value_function)
value_function1, optimal_policy1 = policy_iteration()
#UNIT TEST FOR POLICY EVALUATION
#print(v is policy_evaluation(optimal_policy))
#print(np.sum((np.sum(v - policy_evaluation(optimal_policy))>10**(-3))))
#Compare Algorithms
print("Value functions differ in {} states".format(np.sum((np.sum(value_function1 - value_function>10**(-3))))))
print('Are value functions the same objects?', value_function is value_function1)
print('Are obtained policies the same in value?',(optimal_policy == optimal_policy1).all())
print('Are obtained policies the same objects?', optimal_policy is optimal_policy1)
assert False
| """
Car Rental Problem
Two renting car locations.
-If a customer comes and rents a car the reward is 10$.
-If he is out of cars the business is lost.
-Cars are avaliable a day after they are returned
-Number of requested cars ~ Poisson(3)
-Number of returned cars ~ Poisson(4)
-No more than 20 cars at each location. (Any extra cars just dissappear)
-Each night you can move 0,1,2,3,4 or 5 cars between the two locations
-The cost of moving a car is 2$
-Gamma = 0.9
states: (0,0) -> (20,20)
actions: [-5,-4,-3,-2,-1,0,1,2,3,4,5] - How many cars we move from location 1 to location 2
The value function of states (0,x) and (x,0) is 0
"""
import time
import os
import pickle
import multiprocessing
from multiprocessing.pool import ThreadPool as Pool
import scipy.stats as stats
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
moving_cost = 2
rent_price = 10
max_cars = 20
request_mean_1 = 3
request_mean_2 = 4
return_mean_1 = 3
return_mean_2 = 2
compute = True
def compute_dynamics(init_state):
global max_cars, request_mean_1, request_mean_2, return_mean_1, return_mean_2, rent_price, moving_cost
i, j = init_state
transition_probabilities_and_rewards = {}
#HANDLE REQUESTS
#CASE 1: LOST BUSSINES
prob_lost_1 = 1 - stats.poisson.cdf(k=i, mu = request_mean_1)
prob_lost_2 = 1 - stats.poisson.cdf(k=j, mu = request_mean_2)
prob_lost = prob_lost_1 + prob_lost_2 - prob_lost_1*prob_lost_2
#CASE 2: NOT LOST
#LOOP OVER VALID RENTS
for req_1 in range(i + 1):
for req_2 in range(j + 1):
req_prob = stats.poisson.pmf(k = req_1, mu = request_mean_1) * stats.poisson.pmf(k = req_2, mu = request_mean_2)
reward = (req_1 + req_2)*rent_price
#LOOP OVER RETURNS
for ret_1 in range(max_cars + 1 - (i-req_1)):
probb = 0
for ret_2 in range(max_cars + 1 - (j-req_2)):
#ADD IFS
if ret_1 == max_cars - (i-req_1):
ret_1_prob = 1 - stats.poisson.cdf(k = ret_1 - 1, mu = return_mean_1)
else:
ret_1_prob = stats.poisson.pmf(k = ret_1, mu = return_mean_1)
if ret_2 == max_cars - (j-req_2):
ret_2_prob = 1 - stats.poisson.cdf(k = ret_2 - 1, mu = return_mean_2)
else:
ret_2_prob = stats.poisson.pmf(k = ret_2, mu = return_mean_2)
final_state = (i - req_1 + ret_1, j - req_2 + ret_2)
ret_prob = ret_1_prob * ret_2_prob
probb += ret_2_prob
if final_state not in transition_probabilities_and_rewards:
transition_probabilities_and_rewards[final_state] = []
transition_probabilities_and_rewards[final_state].append((req_prob*ret_prob, reward))
transition_probabilities_and_expected_rewards = {}
transition_probabilities = np.zeros((max_cars+1, max_cars+1))
expected_rewards = np.zeros((max_cars+1, max_cars+1))
for state in transition_probabilities_and_rewards:
total_prob = 0
expected_reward = 0
for prob, reward in transition_probabilities_and_rewards[state]:
total_prob += prob
expected_reward += prob*reward
transition_probabilities[state[0], state[1]] = total_prob
expected_rewards[state[0], state[1]] = expected_reward / total_prob
if 1 - np.sum(np.sum(transition_probabilities)) - prob_lost > 0.01:
print("ATTENTION")
print("Transition probabilities for this state do not add up to 1!")
print(init_state, "{:.2f}".format(100*(1 - np.sum(np.sum(transition_probabilities)) - prob_lost)))
return transition_probabilities, expected_rewards, prob_lost
def compute_dynamics_one_it(i):
global max_cars, transition_probabilities, expected_rewards, lost_probabilities
start = time.time()
for j in range(max_cars+1):
transition_probabilities[i,j,:,:], expected_rewards[i,j,:,:], lost_probabilities[i,j] = compute_dynamics((i,j))
print(i,j)
print(i, time.time() - start,'s')
def compute_full_dynamics_in_parallel():
global max_cars, transition_probabilities, expected_rewards, lost_probabilities
pool_size = multiprocessing.cpu_count()-1
#pool_size = 4
pool = Pool(pool_size)
start = time.time()
for i in range(max_cars+1):
pool.apply_async(compute_dynamics_one_it, (i,))
pool.close()
pool.join()
with open('./pymdp/saved_variab les/transition_probabilities.pkl', 'wb') as f:
pickle.dump(transition_probabilities, f)
with open('./pymdp/saved_variables/expected_rewards.pkl', 'wb') as f:
pickle.dump(expected_rewards, f)
with open('./pymdp/saved_variables/lost_probabilities.pkl', 'wb') as f:
pickle.dump(lost_probabilities, f)
def compute_full_dynamics():
global max_cars, transition_probabilities, expected_rewards, lost_probabilities
start = time.time()
for i in range(max_cars+1):
print(i, time.time() - start,'s')
for j in range(max_cars+1):
transition_probabilities[i,j,:,:], expected_rewards[i,j,:,:], lost_probabilities[i,j] = compute_dynamics((i,j))
with open('./pymdp/saved_variables/transition_probabilities.pkl', 'wb') as f:
pickle.dump(transition_probabilities, f)
with open('./pymdp/saved_variables/expected_rewards.pkl', 'wb') as f:
pickle.dump(expected_rewards, f)
with open('./pymdp/saved_variables/lost_probabilities.pkl', 'wb') as f:
pickle.dump(lost_probabilities, f)
def load_dynamics():
global transition_probabilities, expected_rewards, lost_probabilities
with open('./saved_variables/transition_probabilities.pkl', 'rb') as f:
transition_probabilities = pickle.load(f)
with open('./saved_variables/expected_rewards.pkl', 'rb') as f:
expected_rewards = pickle.load(f)
with open('./saved_variables/lost_probabilities.pkl', 'rb') as f:
lost_probabilities = pickle.load(f)
def value_iteration(gamma = 0.9, delta = 10**(-3)):
global max_cars, request_mean_1, request_mean_2, return_mean_1, return_mean_2, rent_price, moving_cost, transition_probabilities, expected_rewards, optimal_policy
max_improvement = 10
v = np.zeros((max_cars + 1, max_cars + 1))
while max_improvement > delta:
#print('HI', max_improvement)
max_improvement = 0
#LOOP OVER STATES
for i in range(max_cars + 1):
for j in range(max_cars + 1):
best_value = -1
#LOOP OVER ACTIONS
for a in actions:
new_i = i - a
new_j = j + a
if new_i>max_cars or new_i<0 or new_j>max_cars or new_j<0:
continue
action_value = np.sum(np.sum(np.multiply(transition_probabilities[new_i, new_j], gamma*v + expected_rewards[new_i,new_j]))) - abs(a)*moving_cost
if action_value > best_value:
best_value = action_value
improvement = abs(v[i,j] - best_value)
v[i,j] = best_value
if improvement > max_improvement:
max_improvement = improvement
return v
def get_optimal_policy(v, gamma = 0.9):
optimal_policy = np.zeros((max_cars + 1, max_cars + 1), dtype = int)
for i in range(max_cars + 1):
for j in range(max_cars + 1):
best_value = -1
#LOOP OVER ACTIONS
for a in actions:
new_i = i - a
new_j = j + a
if new_i>max_cars or new_i<0 or new_j>max_cars or new_j<0:
continue
action_value = np.sum(np.sum(np.multiply(transition_probabilities[new_i, new_j], gamma*v + expected_rewards[new_i,new_j]))) - abs(a)*moving_cost
if action_value >= best_value:
best_value = action_value
optimal_policy[i,j] = a
return optimal_policy
def policy_evaluation(policy, gamma = 0.9, delta = 10**(-3)):
global max_cars, request_mean_1, request_mean_2, return_mean_1, return_mean_2, rent_price, moving_cost, v, transition_probabilities, expected_rewards
v = np.zeros((max_cars + 1, max_cars + 1))
max_improvement = delta + 1
while max_improvement > delta:
max_improvement = 0
#LOOP OVER STATES
for i in range(max_cars + 1):
for j in range(max_cars + 1):
action = policy[i,j]
new_i = i - action
new_j = j + action
new_value = np.sum(np.sum(np.multiply(transition_probabilities[new_i, new_j], gamma*v + expected_rewards[new_i,new_j]))) - abs(action)*moving_cost
improvement = abs(new_value - v[i,j])
if improvement > max_improvement:
max_improvement = improvement
v[i,j] = new_value
return v
def policy_improvement(policy, v, gamma = 0.9, delta = 10**(-3)):
global max_cars, request_mean_1, request_mean_2, return_mean_1, return_mean_2, rent_price, moving_cost, transition_probabilities, expected_rewards
policy_stable = True
for i in range(max_cars + 1):
for j in range(max_cars + 1):
current_action = policy[i,j]
new_i = i - current_action
new_j = j + current_action
current_action_value = np.sum(np.sum(np.multiply(transition_probabilities[new_i, new_j], gamma*v + expected_rewards[new_i,new_j]))) - abs(current_action)*moving_cost
#LOOP OVER ACTIONS
for a in actions:
new_i = i - a
new_j = j + a
if new_i>max_cars or new_i<0 or new_j>max_cars or new_j<0:
continue
action_value = np.sum(np.sum(np.multiply(transition_probabilities[new_i, new_j], gamma*v + expected_rewards[new_i,new_j]))) - abs(a)*moving_cost
if action_value > current_action_value:
v[i,j] = action_value
policy[i,j] = a
policy_stable = False
current_action_value = action_value
return policy, policy_stable
def policy_iteration(k = 3, gamma = 0.9, delta = 10**(-3)):
v = np.zeros((max_cars + 1, max_cars + 1))
policy = np.zeros((max_cars + 1, max_cars + 1), dtype = int)
policy_stable = False
while policy_stable is False:
v = policy_evaluation(policy, gamma, delta)
policy, policy_stable = policy_improvement(policy, v, gamma, delta)
return v, policy
def plot_value_function3D():
X_arr = []
Y_arr = []
Z_arr = []
for i in range(max_cars+1):
for j in range(max_cars+1):
X_arr.append(i)
Y_arr.append(j)
Z_arr.append(v[i,j])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X_arr, Y_arr, Z_arr)
plt.show()
def plot_optimal_policy3D():
X_arr = []
Y_arr = []
Z_arr = []
for i in range(max_cars+1):
for j in range(max_cars+1):
X_arr.append(i)
Y_arr.append(j)
Z_arr.append(optimal_policy[i,j])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X_arr, Y_arr, Z_arr)
plt.show()
value_function = np.zeros((max_cars + 1, max_cars + 1))
actions = [a for a in range(-5, 6)]
transition_probabilities = np.zeros((max_cars + 1, max_cars + 1, max_cars + 1, max_cars + 1))
expected_rewards = np.zeros((max_cars + 1, max_cars + 1, max_cars + 1, max_cars + 1))
lost_probabilities = np.zeros((max_cars + 1, max_cars + 1))
#compute_full_dynamics_in_parallel()
load_dynamics()
value_function = value_iteration()
optimal_policy = get_optimal_policy(value_function)
value_function1, optimal_policy1 = policy_iteration()
#UNIT TEST FOR POLICY EVALUATION
#print(v is policy_evaluation(optimal_policy))
#print(np.sum((np.sum(v - policy_evaluation(optimal_policy))>10**(-3))))
#Compare Algorithms
print("Value functions differ in {} states".format(np.sum((np.sum(value_function1 - value_function>10**(-3))))))
print('Are value functions the same objects?', value_function is value_function1)
print('Are obtained policies the same in value?',(optimal_policy == optimal_policy1).all())
print('Are obtained policies the same objects?', optimal_policy is optimal_policy1)
assert False
| en | 0.808405 | Car Rental Problem Two renting car locations. -If a customer comes and rents a car the reward is 10$. -If he is out of cars the business is lost. -Cars are avaliable a day after they are returned -Number of requested cars ~ Poisson(3) -Number of returned cars ~ Poisson(4) -No more than 20 cars at each location. (Any extra cars just dissappear) -Each night you can move 0,1,2,3,4 or 5 cars between the two locations -The cost of moving a car is 2$ -Gamma = 0.9 states: (0,0) -> (20,20) actions: [-5,-4,-3,-2,-1,0,1,2,3,4,5] - How many cars we move from location 1 to location 2 The value function of states (0,x) and (x,0) is 0 #HANDLE REQUESTS #CASE 1: LOST BUSSINES #CASE 2: NOT LOST #LOOP OVER VALID RENTS #LOOP OVER RETURNS #ADD IFS #pool_size = 4 #print('HI', max_improvement) #LOOP OVER STATES #LOOP OVER ACTIONS #LOOP OVER ACTIONS #LOOP OVER STATES #LOOP OVER ACTIONS #compute_full_dynamics_in_parallel() #UNIT TEST FOR POLICY EVALUATION #print(v is policy_evaluation(optimal_policy)) #print(np.sum((np.sum(v - policy_evaluation(optimal_policy))>10**(-3)))) #Compare Algorithms | 3.740256 | 4 |
manipulator/script/demo_task.py | andy-Chien/blue_arm | 0 | 6613950 | <filename>manipulator/script/demo_task.py
#!/usr/bin/env python
import rospy
from manipulator import BlueArmMoveGroup
def main():
rospy.init_node('blue_arm_sample', anonymous=True)
blue_arm = BlueArmMoveGroup()
pos = [0.3, 0, 0.15]
euler = [0, 0, 0]
blue_arm.set_speed(0.8)
if blue_arm.go_to_pose_goal(pos, euler) is False:
rospy.logerr("Move Robot Failed!!")
pos = [0.3, 0, 0.15]
euler = [30, 0, 0]
blue_arm.set_speed(0.6)
if blue_arm.go_to_pose_goal(pos, euler) is False:
rospy.logerr("Move Robot Failed!!")
pos = [0.3, 0, 0.15]
euler = [0, 30, 0]
blue_arm.set_speed(0.4)
if blue_arm.go_to_pose_goal(pos, euler) is False:
rospy.logerr("Move Robot Failed!!")
pos = [0.3, 0, 0.15]
euler = [0, 0, 30]
blue_arm.set_speed(0.2)
if blue_arm.go_to_pose_goal(pos, euler) is False:
rospy.logerr("Move Robot Failed!!")
if __name__ == '__main__':
main() | <filename>manipulator/script/demo_task.py
#!/usr/bin/env python
import rospy
from manipulator import BlueArmMoveGroup
def main():
rospy.init_node('blue_arm_sample', anonymous=True)
blue_arm = BlueArmMoveGroup()
pos = [0.3, 0, 0.15]
euler = [0, 0, 0]
blue_arm.set_speed(0.8)
if blue_arm.go_to_pose_goal(pos, euler) is False:
rospy.logerr("Move Robot Failed!!")
pos = [0.3, 0, 0.15]
euler = [30, 0, 0]
blue_arm.set_speed(0.6)
if blue_arm.go_to_pose_goal(pos, euler) is False:
rospy.logerr("Move Robot Failed!!")
pos = [0.3, 0, 0.15]
euler = [0, 30, 0]
blue_arm.set_speed(0.4)
if blue_arm.go_to_pose_goal(pos, euler) is False:
rospy.logerr("Move Robot Failed!!")
pos = [0.3, 0, 0.15]
euler = [0, 0, 30]
blue_arm.set_speed(0.2)
if blue_arm.go_to_pose_goal(pos, euler) is False:
rospy.logerr("Move Robot Failed!!")
if __name__ == '__main__':
main() | ru | 0.26433 | #!/usr/bin/env python | 2.595951 | 3 |