text string | size int64 | token_count int64 |
|---|---|---|
import enc
import config
import motor
import threading
import time
enc_t = None
pwm_range = (50, 90)
class EncoderThread(enc.EncoderReader, threading.Thread):
def __init__(self):
enc.EncoderReader.__init__(self)
threading.Thread.__init__(self)
def main():
global enc_t
enc_t = EncoderThread()
enc_t.start()
with motor.motor_setup(*config.LMP, cleanup=False) as run:
print 'Left motor'
for i in range(*pwm_range):
print "test with %s pwm" % (i)
run(i)
if enc.ENC_POS[enc.LEFT] > 0:
print "done LEFT pwm min is %s" % (i)
run(0)
break
time.sleep(2)
time.sleep(5)
with motor.motor_setup(*config.RMP) as run:
print '\nRight motor'
for i in range(*pwm_range):
print "test with %s pwm" % (i)
run(i)
if enc.ENC_POS[enc.RIGHT] > 0:
print "done RIGHT pwm min is %s" % (i)
run(0)
break
time.sleep(2)
enc_t.stop()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
enc_t.stop()
pass
| 1,198 | 411 |
from typing import Final, Literal
DefaultPermissionsType = Final[list[tuple[str, str]]]
# Default ResponsibleGroup types
PRIMARY_TYPE: Literal["PRIMARY"] = "PRIMARY"
HR_TYPE: Literal["HR"] = "HR"
ORGANIZATION: Final = "Organization member"
INDOK: Final = "Indøk"
REGISTERED_USER: Final = "Registered user"
PRIMARY_GROUP_NAME: Final = "Medlem"
HR_GROUP_NAME: Final = "HR"
DEFAULT_ORGANIZATION_PERMISSIONS: DefaultPermissionsType = [
("events", "add_event"),
("events", "change_event"),
("events", "delete_event"),
("listings", "add_listing"),
("listings", "change_listing"),
("listings", "delete_listing"),
("organizations", "add_membership"),
]
DEFAULT_INDOK_PERMISSIONS: DefaultPermissionsType = [
("listings", "view_listing"),
("events", "add_signup"),
("events", "view_signup"),
("events", "change_signup"),
("organizations", "view_organization"),
("forms", "add_answer"),
("forms", "change_answer"),
("forms", "view_answer"),
("forms", "view_form"),
("forms", "add_response"),
("archive", "view_archivedocument"),
]
DEFAULT_REGISTERED_USER_PERMISSIONS: DefaultPermissionsType = [
("events", "view_event"),
]
DEFAULT_GROUPS = {
ORGANIZATION: DEFAULT_ORGANIZATION_PERMISSIONS,
INDOK: DEFAULT_INDOK_PERMISSIONS,
REGISTERED_USER: DEFAULT_REGISTERED_USER_PERMISSIONS,
}
| 1,364 | 484 |
from __future__ import annotations
from typing import Optional
from colorama import Fore, Style
from magda.utils.logger.parts import LoggerParts
from magda.utils.logger.printers.base import BasePrinter
from magda.utils.logger.printers.shared import with_log_level_colors
class MessagePrinter(BasePrinter):
EVENT_START_MARKER = '['
EVENT_END_MARKER = ']'
def _with_event_colors(self, text: str) -> str:
return (
Style.BRIGHT + Fore.GREEN
+ text
+ Fore.RESET + Style.NORMAL
)
def flush(
self,
colors: bool,
msg: str = None,
is_event: bool = False,
level: Optional[LoggerParts.Level] = None,
**kwargs,
) -> Optional[str]:
if is_event:
text = f'{self.EVENT_START_MARKER}{msg}{self.EVENT_END_MARKER}'
return self._with_event_colors(text) if colors else text
level_value = level.value if level else None
return with_log_level_colors(msg, level_value) if colors else msg
| 1,037 | 331 |
from typing import List
def zcount(list: List[float]) -> float:
return len(list)
# print("stats test")
# print("zcount should be 5 ==", zcount([1.0,2.0,3.0,4.0,5.0]))
def zmean(list: List[float]) -> float:
return sum(list) / zcount(list)
def zmode(list: List[float]) -> float:
return max(set(list), key = list.count)
def zmedian(list: List[float]) -> float:
sortedLst = sorted(list)
lstLen = len(list)
index = (lstLen - 1) // 2
if (lstLen % 2):
return sortedLst[index]
else:
return (sortedLst[index] + sortedLst[index + 1])/2.0
def zvariance(list: List[float]) -> float:
# Number of observations
n = zcount(list) - 1
# Mean of the data
#mean = sum(data) / n
mean = zmean(list)
# Square deviations
deviations = [abs(mean - xi) ** 2 for xi in list]
# Variance
variance = sum(deviations) / n
return variance
def zstddev(list: List[float]) -> float:
return 0.0
def zstderr(list: List[float]) -> float:
return 0.0
def zcov(a, b):
pass
def zcorr(listx: List[float], listy: List[float]) -> float:
return 0.0
def readDataSets(files):
# print("in readDataSets...", files)
data = {}
for file in files:
twoLists = readDataFile(file)
data[file] = twoLists
return data
def readDataFile(file):
x,y = [], []
with open(file) as f:
first_line = f.readline() # consume headers
for l in f:
row = l.split(',')
#print(row, type(row))
x.append(float(row[0]))
y.append(float(row[1]))
return (x,y)
| 1,615 | 594 |
"""Mixtape Model."""
from masoniteorm.models import Model
class Mixtape(Model):
__table__="mixtape" | 106 | 40 |
import numpy as np
from scipy.linalg import solve_toeplitz, solve
from scipy.signal import fftconvolve
from scipy.interpolate import Rbf
from scorr import xcorr, xcorr_grouped_df, xcorrshift, fftcrop, corr_mat
# Helpers
# =====================================================================
def integrate(x):
"Return lag 1 sum, i.e. price from return, or an integrated kernel."
return np.concatenate([[0], np.cumsum(x[:-1])])
def smooth_tail_rbf(k, l0=3, tau=5, smooth=1, epsilon=1):
"""Smooth tail of array k with radial basis functions"""
# interpolate in log-lags
l = np.log(np.arange(l0,len(k)))
# estimate functions
krbf = Rbf(
l, k[l0:], function='multiquadric', smooth=smooth, epsilon=epsilon
)
# weights to blend with original for short lags
w = np.exp(-np.arange(1,len(k)-l0+1)/ float(tau))
# interpolate
knew = np.empty_like(k)
knew[:l0] = k[:l0]
knew[l0:] = krbf(l) * (1-w) + k[l0:] * w
#done
return knew
def propagate(s, G, sfunc=np.sign):
"""Simulate propagator model from signs and one kernel.
Equivalent to tim1, one of the kernels in tim2 or hdim2.
"""
steps = len(s)
s = sfunc(s[:len(s)])
p = fftconvolve(s, G)[:steps]
return p
# Responses
# =====================================================================
def _return_response(ret, x, maxlag):
"""Helper for response and response_grouped_df."""
# return what?
ret = ret.lower()
res = []
for i in ret:
if i == 'l':
# lags
res.append(np.arange(-maxlag,maxlag+1))
elif i == 's':
res.append(
# differential response
np.concatenate([x[-maxlag:], x[:maxlag+1]])
)
elif i == 'r':
res.append(
# bare response === cumulated differential response
np.concatenate([
-np.cumsum(x[:-maxlag-1:-1])[::-1],
[0],
np.cumsum(x[:maxlag])
])
)
if len(res) > 1:
return tuple(res)
else:
return res[0]
def response(r, s, maxlag=10**4, ret='lsr', subtract_mean=False):
"""Return lag, differential response S, response R.
Note that this commonly used price response is a simple cross correlation
and NOT equivalent to the linear response in systems analysis.
Parameters:
===========
r: array-like
Returns
s: array-like
Order signs
maxlag: int
Longest lag to calculate
ret: str
can include 'l' to return lags, 'r' to return response, and
's' to return differential response (in specified order).
subtract_mean: bool
Subtract means first. Default: False (signal means already zero)
"""
maxlag = min(maxlag, len(r) - 2)
s = s[:len(r)]
# diff. resp.
# xcorr == S(0, 1, ..., maxlag, -maxlag, ... -1)
x = xcorr(r, s, norm='cov', subtract_mean=subtract_mean)
return _return_response(ret, x, maxlag)
def response_grouped_df(
df, cols, nfft='pad', ret='lsr', subtract_mean=False, **kwargs
):
"""Return lag, differential response S, response R calculated daily.
Note that this commonly used price response is a simple cross correlation
and NOT equivalent to the linear response in systems analysis.
Parameters
==========
df: pandas.DataFrame
Dataframe containing order signs and returns
cols: tuple
The columns of interest
nfft:
Length of the fft segments
ret: str
What to return ('l': lags, 'r': response, 's': incremental response).
subtract_mean: bool
Subtract means first. Default: False (signal means already zero)
See also response, spectral.xcorr_grouped_df for more explanations
"""
# diff. resp.
x = xcorr_grouped_df(
df,
cols,
by = 'date',
nfft = nfft,
funcs = (lambda x: x, lambda x: x),
subtract_mean = subtract_mean,
norm = 'cov',
return_df = False,
**kwargs
)[0]
# lag 1 -> element 0, lag 0 -> element -1, ...
#x = x['xcorr'].values[x.index.values-1]
maxlag = len(x) / 2
return _return_response(ret, x, maxlag)
# Analytical power-laws
# =====================================================================
def beta_from_gamma(gamma):
"""Return exponent beta for the (integrated) propagator decay
G(lag) = lag**-beta
that compensates a sign-autocorrelation
C(lag) = lag**-gamma.
"""
return (1-gamma)/2.
def G_pow(steps, beta):
"""Return power-law Propagator kernel G(l). l=0...steps"""
G = np.arange(1,steps)**-beta#+1
G = np.r_[0, G]
return G
def k_pow(steps, beta):
"""Return increment of power-law propagator kernel g. l=0...steps"""
return np.diff(G_pow(steps, beta))
# TIM1 specific
# =====================================================================
def calibrate_tim1(c, Sl, maxlag=10**4):
"""Return empirical estimate TIM1 kernel
Parameters:
===========
c: array-like
Cross-correlation (covariance).
Sl: array-like
Price-response. If the response is differential, so is the returned
kernel.
maxlag: int
length of the kernel.
See also: integrate, g2_empirical, tim1
"""
lS = int(len(Sl) / 2)
g = solve_toeplitz(c[:maxlag], Sl[lS:lS+maxlag])
return g
def tim1(s, G, sfunc=np.sign):
"""Simulate Transient Impact Model 1, return price or return.
Result is the price p when the bare responses G is passed
and the 1 step ahead return p(t+1)-p(t) for the differential kernel
g, where G == numpy.cumsum(g).
Parameters:
===========
s: array-like
Order signs
G: array-like
Kernel
See also: calibrate_tim1, integrate, tim2, hdim2.
"""
return propagate(s, G, sfunc=sfunc)
# TIM2 specific
# =====================================================================
def calibrate_tim2(
nncorr, cccorr, cncorr, nccorr, Sln, Slc, maxlag=2**10
):
"""
Return empirical estimate for both kernels of the TIM2.
(Transient Impact Model with two propagators)
Parameters:
===========
nncorr: array-like
Cross-covariance between non-price-changing (n-) orders.
cccorr: array-like
Cross-covariance between price-changing (c-) orders.
cncorr: array-like
Cross-covariance between c- and n-orders
nccorr: array-like
Cross-covariance between n- and c-orders.
Sln: array-like
(incremental) price response for n-orders
Slc: array-like
(incremental) price response for c-orders
maxlag: int
Length of the kernels.
See also: calibrate_tim1, calibrate_hdim2
"""
# incremental response
lSn = int(len(Sln) / 2)
lSc = int(len(Slc) / 2)
S = np.concatenate([Sln[lSn:lSn+maxlag], Slc[lSc:lSc+maxlag]])
# covariance matrix
mat_fn = lambda x: corr_mat(x, maxlag=maxlag)
C = np.bmat([
[mat_fn(nncorr), mat_fn(cncorr)],
[mat_fn(nccorr), mat_fn(cccorr)]
])
# solve
g = solve(C, S)
gn = g[:maxlag]
gc = g[maxlag:]
return gn, gc
def tim2(s, c, G_n, G_c, sfunc=np.sign):
"""Simulate Transient Impact Model 2
Returns prices when integrated kernels are passed as arguments
or returns for differential kernels.
Parameters:
===========
s: array
Trade signs
c: array
Trade labels (1 = change; 0 = no change)
G_n: array
Kernel for non-price-changing trades
G_c: array
Kernel for price-changing trades
sfunc: function [optional]
Function to apply to signs. Default: np.sign.
See also: calibrate_tim2, tim1, hdim2.
"""
assert c.dtype == bool, "c must be a boolean indicator!"
return propagate(s * c, G_c) + propagate(s * (~c), G_n)
# HDIM2 specific
# =====================================================================
def calibrate_hdim2(
Cnnc, Cccc, Ccnc, Sln, Slc,
maxlag=None, force_lag_zero=True
):
"""Return empirical estimate for both kernels of the HDIM2.
(History Dependent Impact Model with two propagators).
Requres three-point correlation matrices between the signs of one
non-lagged and two differently lagged orders.
We distinguish between price-changing (p-) and non-price-changing (n-)
orders. The argument names corresponds to the argument order in
spectral.x3corr.
Parameters:
===========
Cnnc: 2d-array-like
Cross-covariance matrix for n-, n-, c- orders.
Cccc: 2d-array-like
Cross-covariance matrix for c-, c-, c- orders.
Ccnc: 2d-array-like
Cross-covariance matrix for c-, n-, c- orders.
Sln: array-like
(incremental) lagged price response for n-orders
Slc: array-like
(incremental) lagged price response for c-orders
maxlag: int
Length of the kernels.
See also: hdim2,
"""
maxlag = maxlag or min(len(Cccc), len(Sln))/2
# incremental response
lSn = int(len(Sln) / 2)
lSc = int(len(Slc) / 2)
S = np.concatenate([
Sln[lSn:lSn+maxlag],
Slc[lSc:lSc+maxlag]
])
# covariance matrix
Cncc = Ccnc.T
C = np.bmat([
[Cnnc[:maxlag,:maxlag], Ccnc[:maxlag,:maxlag]],
[Cncc[:maxlag,:maxlag], Cccc[:maxlag,:maxlag]]
])
if force_lag_zero:
C[0,0] = 1
C[0,1:] = 0
# solve
g = solve(C, S)
gn = g[:maxlag]
gc = g[maxlag:]
return gn, gc
def hdim2(s, c, k_n, k_c, sfunc=np.sign):
"""Simulate History Dependent Impact Model 2, return return.
Parameters:
===========
s: array
Trade signs
c: array
Trade labels (1 = change; 0 = no change)
k_n: array
Differential kernel for non-price-changing trades
k_c: array
Differential kernel for price-changing trades
sfunc: function [optional]
Function to apply to signs. Default: np.sign.
See also: calibrate_hdim2, tim2, tim1.
"""
assert c.dtype == bool, "c must be a boolean indicator!"
return c * (propagate(s * c, k_c) + propagate(s * (~c), k_n))
| 10,872 | 3,816 |
from longest import longest
import unittest
class Test(unittest.TestCase):
def test_1(self):
result = longest("aretheyhere", "yestheyarehere")
self.assertEqual(result, "aehrsty")
def test_2(self):
result = longest("loopingisfunbutdangerous", "lessdangerousthancoding")
self.assertEqual(result, "abcdefghilnoprstu")
def test_3(self):
result = longest("inmanylanguages", "theresapairoffunctions")
self.assertEqual(result, "acefghilmnoprstuy")
def test_4(self):
result = longest("abcd", "abecd")
self.assertEqual(result, "abcde")
if __name__ == "__main__":
unittest.main()
| 661 | 224 |
name = "getv"
| 14 | 8 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: zparteka
"""
def read(infile):
with open(infile, 'r') as f:
line = f.readline()
rules = {}
while line != "\n":
rule = line.strip().split(':')
key = rule[0]
r1 = rule[1].split()[0].split("-")
r2 = rule[1].split()[2].split("-")
rules[key] = ((int(r1[0]), int(r1[1])), (int(r2[0]), int(r2[1])))
line = f.readline()
line = f.readline()
ticket = [int(i) for i in f.readline().strip().split(",")]
nearby = []
f.readline()
f.readline()
while line:
line = f.readline()
if line != "":
nearby.append([int(i) for i in line.strip().split(",")])
return rules, ticket, nearby
def check_nearby(rules, nearby):
rules = rules.values()
rules = [i for sub in rules for i in sub]
print(rules)
wrong = 0
for ticket in nearby:
for number in ticket:
flag = False
for r in rules:
if number in range(r[0], r[1] + 1):
flag = True
if flag:
continue
else:
wrong += number
break
return wrong
def remove_invalid(rules, nearby):
rules = rules.values()
rules = [i for sub in rules for i in sub]
valid = []
for ticket in nearby:
tick = True
for number in ticket:
flag = False
for r in rules:
if number in range(r[0], r[1] + 1):
flag = True
if flag:
continue
else:
tick = False
break
if tick:
valid.append(ticket)
return valid
def find_positions(nearby, rules):
transposed = list(map(list, zip(*nearby)))
result = [0] * len(transposed)
for row in range(len(transposed)):
possible_rules = list(rules.keys())
for number in transposed[row]:
for name in rules.keys():
rule = rules[name]
if number not in range(rule[0][0], rule[0][1] + 1) and number not in range(rule[1][0], rule[1][1] + 1):
possible_rules.remove(name)
result[row] = (possible_rules, row)
result.sort(key=lambda t: len(t[0]))
occured = [0] * len(result)
for i in range(len(result)):
for j in result[i][0]:
if j not in occured:
occured[result[i][1]] = j
indexes = []
for i in range(len(occured)):
if occured[i].startswith("departure"):
indexes.append(i)
return indexes
def main():
example = "input"
rules, ticket, nearby = read(example)
valid_nearby = remove_invalid(rules, nearby)
indexes = find_positions(valid_nearby, rules)
answer = 1
for i in indexes:
answer *= ticket[i]
print(ticket[i])
print(answer)
print(ticket)
if __name__ == '__main__':
main()
| 3,035 | 944 |
'''
'''
from django.contrib.auth.models import User, Group
from rest_framework import status, viewsets
from rest_framework.exceptions import ValidationError
from rest_framework import mixins
from rest_framework.filters import OrderingFilter
from django_filters.rest_framework import DjangoFilterBackend
from src.base_view import BaseViewSet
from ..serializers import UserSerializer, UserFullDataSerializer
from ..filters import UserFilter, UserFullDataFilter
class UserViewSet(BaseViewSet):
'''
Vista maestro de usuarios
FILTROS:
username: coicidencia o valor exacto
first_name: coicidencia o valor exacto
last_name: coicidencia o valor exacto
is_active: valor exacto
'''
permission_code = 'user'
queryset = User.objects.all().prefetch_related('groups')
serializer_class = UserSerializer
filter_class = UserFilter
filter_backends = (DjangoFilterBackend, OrderingFilter)
def perform_destroy(self, serializer):
errors = {}
user = User.objects.get(id=self.kwargs['pk'])
user.is_active = False
user.save()
class UserFullDataViewSet(mixins.ListModelMixin, mixins.RetrieveModelMixin, viewsets.GenericViewSet):
permission_code = 'user'
queryset = User.objects.all().prefetch_related('groups').order_by('username')
serializer_class = UserFullDataSerializer
filter_class = UserFullDataFilter
filter_backends = (DjangoFilterBackend, OrderingFilter)
| 1,527 | 469 |
from random import randint
from core.players import Players
class Human(Players):
def __init__(self, name, classe):
super().__init__(name, classe)
self.hp = 100
self.strengh = 15
self.defense = 15
self.speed = 50
def __str__(self, super_desc=None, super_stats=None):
desc = f"Je m'appelle {self.name} et je suis un "
if super_desc:
desc += super_desc
else:
desc += f"simple {self.classe}.\n"
stats = f"Mes stats sont : \nhp = {self.hp}\nstrengh = {self.strengh}\ndefense = {self.defense}\nspeed = {self.speed}\n"
if super_stats:
stats += super_stats
desc = desc + stats
return desc
def do_damage(self, damage=None):
print(f"{self.name} prepare un coup a {damage}")
return self.strengh
def take_damage(self, input_damage):
evade = randint(0, 100)
if evade <= self.defense:
print(f"{self.name} a esquive le coup")
return
self.hp -= input_damage
if self.hp <= 0:
print(f"{self.name} est DCD, il n'etait pas si fort que ca...")
return "ENDGAME"
print(f"{self.name} takes {input_damage} damages and now have {self.hp} HP.")
class War(Human):
def __init__(self, name, classe):
super().__init__(name, classe)
self.hp = randint(90, 120)
self.armor = 20
self.speed = 40
def __str__(self):
desc = f"un furieux {self.classe}.\n"
stats = f"armor = {self.armor}\n"
return super().__str__(desc, stats)
def do_damage(self):
return super().do_damage(self.strengh)
def take_damage(self, input_damage):
reduced_damage = input_damage * (1 - self.armor / 100)
return super().take_damage(reduced_damage)
class Mage(Human):
def __init__(self, name, classe):
super().__init__(name, classe)
self.hp = randint(60, 85)
self.magic = 30
def __str__(self):
desc = f"un puissant {self.classe}.\n"
stats = f"magic = {self.magic}\n"
return super().__str__(desc, stats)
def do_damage(self):
critic = randint(0, 100)
if critic <= self.magic:
print("Critical hit!")
return super().do_damage(self.strengh * 1.5)
return super().do_damage(self.strengh)
def take_damage(self, input_damage):
return super().take_damage(input_damage)
| 2,481 | 876 |
from raytracerchallenge_python.tuple import Color
from math import pow
class Material:
def __init__(self):
self.color = Color(1, 1, 1)
self.ambient = 0.1
self.diffuse = 0.9
self.specular = 0.9
self.shininess = 200.0
self.pattern = None
self.reflective = 0.0
self.transparency = 0.0
self.refractive_index = 1.0
def __eq__(self, other):
return all([self.color == other.color,
self.ambient == other.ambient,
self.diffuse == other.diffuse,
self.specular == other.specular,
self.shininess == other.shininess,
self.pattern == other.pattern,
self.transparency == other.transparency,
self.refractive_index == other.refractive_index])
def lighting(self, object, light, point, eyev, normalv, in_shadow=False):
if self.pattern:
color = self.pattern.pattern_at_shape(object, point)
else:
color = self.color
effective_color = color * light.intensity
ambient = effective_color * self.ambient
if in_shadow:
return ambient
lightv = (light.position - point).normalize()
light_dot_normal = lightv.dot(normalv)
black = Color(0, 0, 0)
if light_dot_normal < 0:
diffuse = black
specular = black
else:
diffuse = effective_color * self.diffuse * light_dot_normal
reflectv = (-lightv).reflect(normalv)
reflect_dot_eye = reflectv.dot(eyev)
if reflect_dot_eye <= 0:
specular = black
else:
factor = pow(reflect_dot_eye, self.shininess)
specular = light.intensity * self.specular * factor
return ambient + diffuse + specular
| 1,893 | 560 |
import subprocess, shlex
from dawgmon import commands
def local_run(dirname, commandlist):
for cmdname in commandlist:
cmd = commands.COMMAND_CACHE[cmdname]
# shell escape such that we can pass command properly onwards
# to the Popen call
cmd_to_execute = shlex.split(cmd.command)
p = subprocess.Popen(cmd_to_execute, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
# XXX we should probably try and get the system encoding for
# this instead of defaulting to UTF-8.
stdout = stdout.decode("utf-8")
stderr = stderr.decode("utf-8")
yield (cmd.name, "$ %s" % " ".join(cmd_to_execute), p.returncode, stdout, stderr)
| 673 | 240 |
class Employee():
def __init__(self, name, doc_number, salary):
self._name = name
self._doc_number = doc_number
self._salary = salary | 162 | 49 |
import matplotlib.pyplot as plt
years = [1950, 1955, 1960, 1965, 1970, 1975, 1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015]
pops = [2.5, 2.7, 3, 3.3, 3.6, 4.0, 4.4, 4.8, 5.3, 5.7, 6.1, 6.5, 6.9, 7.3]
deaths = [1.2, 1.7, 1.8, 2.2, 2.5, 2.7, 2.9, 3, 3.1, 3.3, 3.5, 3.8, 4, 4.3]
plt.plot(years, pops, color=(255/255, 100/255, 100/255))
plt.plot(years, deaths, color=(.6, .6, 1))
plt.title("World Population")
plt.ylabel("Population in billion.")
plt.xlabel("Population growth by year.")
plt.show()
| 504 | 351 |
import tkinter as tk
from tkinter import ttk
from matplotlib.pyplot import close
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg,
NavigationToolbar2Tk)
from matplotlib.mathtext import math_to_image
from io import BytesIO
from PIL import ImageTk, Image
from sympy import latex
from math import pi, cos, sin
from sgraph import *
from braid import *
from col_perm import *
from pres_mat import *
from visualization import *
from casson_gordon import *
from typing import List, Tuple, Callable, Dict
from math import log10, floor
font_style = "Calibri"
font_size = 25
# Function for rounding eigenvalues
def round_to_2(x: float):
if(x==0):
return 0
else:
return round(x, -int(floor(log10(abs(x))))+1)
# Class for main window
class Clasper(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.parent = parent
# Configure the grid
self.grid_columnconfigure(0, weight=1)
self.grid_columnconfigure(1, weight=1)
self.grid_columnconfigure(2, weight=1)
self.grid_columnconfigure(3, weight=1)
# Configure counter/control variables
self.braid_inv_control = ""
self.braid_seif_control = ""
self.computed_invariants = False
self.computed_seif = False
# Configure input variables
self.braid_str = tk.StringVar()
self.complete_graph = tk.IntVar(value=0)
# Configure invariant variables
self.cpf = 0
self.alexander = 0
self.signature_value = 0
self.seif = ""
self.pm = 0
# Configure frames for checking the braid
self.braid_check = tk.Frame(self)
self.cycle_decomp_frame = tk.Frame(self)
self.euler_char_frame = tk.Frame(self)
self.euler_char_frame.grid(column=2, row=3, pady=10, sticky='W')
self.euler_char_frame.grid_columnconfigure(0, weight=3)
self.euler_char_frame.grid_columnconfigure(0, weight=1)
self.euler_char_frame.euler_char_val = tk.Frame(self.euler_char_frame)
# Configure frames for everything
self.strands = Strands(self)
self.strands.grid(
column=0, row=4, pady=10, rowspan=6, sticky='N')
self.color = Color(self)
self.color.grid(
column=1, row=4, pady=10, rowspan=6, sticky='N')
self.signature = Signature(self)
self.signature.grid(
column=2, row=4, pady=10, rowspan=6, sticky='N')
self.braid_visual = tk.Frame(self)
self.braid_visual.grid(
column=0, row=14, pady=10, columnspan=4, sticky='N')
self.ccomplex_visual = tk.Frame(self)
self.ccomplex_visual.grid(
column=0, row=15, pady=10, columnspan=4, sticky='N')
self.invariant_frame = tk.Frame(self)
self.invariant_frame.grid(column=0, row=11,
columnspan=4, rowspan=3)
"""
----- Implementing the GUI ----
"""
# (0, 0) Instructions for entering braids
ttk.Label(
self, text='''Braids - LinkInfo format or comma/space '''+
'''separated. Colors and signature inputs - space separated.\n'''+
'''Press enter to compute invariants with defaults.'''
''' See paper for details about the C-Complex.\n'''+
'''Written by Chinmaya Kausik.''',
font=(font_style, font_size), background='cyan').grid(
column=0, row=0, columnspan=4)
# (0, 0->1) Setting up the entry for the braid
ttk.Label(
self, text='Braid:', font=(font_style, font_size)).grid(
column=0, row=1, pady=10)
ttk.Entry(self, textvariable=self.braid_str,
font=(font_style, font_size), width=40).grid(column=1, row=1,
padx=0, pady=10, sticky='W', columnspan=2)
# (1, 2) Examples for braid entries
ttk.Label(
self, text="""Example: '-2 -3 2 -3 -1 -2 -3'"""+
""" or '-2, -3, 2, -3, -1, -2, -3' or """+
"""'{4, {-2, -3, 2, -3, -1, -2, -3}}'""",
font=(font_style, font_size), background='cyan').grid(
column=1, row=2, pady=10, sticky='W', columnspan=3)
# Creating a style object
style = ttk.Style()
# Adding style for buttons
style.configure('C.TButton', font=('calibri', font_size),
background='blue')
# Adding style for radiobuttons
style.configure('C.TRadiobutton', font=('calibri', font_size))
# Adding style for checkbuttons
style.configure('C.TCheckbutton', font=('calibri', font_size))
ttk.Checkbutton(self, text="All Seifert surfaces intersecting",
style='C.TCheckbutton',
variable=self.complete_graph).grid(column=2, row=1,
padx=30, pady=10, sticky='W')
# Setup for printing the cycle decomposition
ttk.Button(self, text="Cycle Decomposition", command=self.compute_cyc,
style='C.TButton').grid(column=0, row=3, pady=10)
# Setup for printing the Euler Characteristic of the C-Complex
ttk.Button(self.euler_char_frame, text="Euler Characteristic of C-Complex",
command=self.get_sgraph_euler_char,
style='C.TButton').grid(column=0, row=0, pady=10, sticky='W')
# Button to compute invariants
ttk.Button(self, text="Compute link invariants",
command=self.get_invariants, style='C.TButton').grid(
column=0, row=10, pady=10)
ttk.Button(self, text="Invariants in LaTeX",
command=self.get_latex, style='C.TButton').grid(
column=1, row=10, pady=10)
ttk.Button(self, text="Export Seifert matrices",
command=self.get_seifert_matrices, style='C.TButton').grid(
column=2, row=10, pady=10)
# Compute invariants with defaults
def compute_with_defaults(self, int: int):
self.strands.strand_choice.set(1)
self.color.color_choice.set(2)
self.signature.signature_choice.set(1)
self.get_invariants()
# Processing Link Info style inputs
def link_info(self, braid: str) -> Braid:
start = braid.index('{')+1
strands = int(braid[start])
new_braid = braid[start:]
braid1 = new_braid[
new_braid.index('{')+1: new_braid.index('}')].split(',')
braid1 = list(filter(lambda x: x.strip()!="", braid1))
braid1 = list(map(lambda x: int(x), braid1))
return Braid(braid1, strands)
# Processing comma separated inputs
def csv_input(self, braid: str) -> List[int]:
braid1 = braid.strip().split(",")
braid1 = list(filter(lambda x: x.strip()!="", braid1))
braid1 = [int(x) for x in braid1]
return braid1
# Processing space separated inputs
def space_input(self, braid: str) -> List[int]:
braid1 = braid.strip().split(" ")
braid1 = list(filter(lambda x: x.strip()!="", braid1))
braid1 = [int(x) for x in braid1]
return braid1
# Command for computing the cycle decomposition and generating the braid
def compute_cyc(self) -> Braid:
self.cycle_decomp_frame.destroy()
self.cycle_decomp_frame = tk.Frame(self)
self.cycle_decomp_frame.grid(
column=1, row=3, pady=10, sticky='W')
p_braid = self.strands.make_braid()
ttk.Label(self.cycle_decomp_frame, text=str(p_braid.cycle_decomp),
font=(font_style, font_size)).pack()
# Command for computing the cycle decomposition and generating the braid
def get_sgraph_euler_char(self) -> Braid:
self.euler_char_frame.euler_char_val.destroy()
self.euler_char_frame.euler_char_val = tk.Frame(self.euler_char_frame)
self.euler_char_frame.euler_char_val.grid(
column=1, row=0, padx=20, pady=10, sticky='E')
try:
graph = self.color.get_graph()
ttk.Label(self.euler_char_frame.euler_char_val,
text="= "+str(graph.sgraph_euler_char()),
font=(font_style, font_size)).pack()
except Exception:
pass
# Print latex
def get_latex(self):
new_window = tk.Toplevel(self)
try:
if((self.braid_inv_control.strip() == self.braid_str.get().strip())
and self.computed_invariants):
pass
else:
graph = self.color.get_graph()
# Print the Euler characteristic of the SGraph
self.get_sgraph_euler_char()
if(self.braid_seif_control.strip() !=
self.braid_str.get().strip()):
(self.seif, self.pm) = presentation_matrix(graph)
self.cpf = self.pm.conway_potential_function(graph)
self.alexander = self.pm.multivar_alexander_poly(graph)
self.computed_invariants = True
self.computed_seif = True
self.braid_inv_control = self.braid_str.get()
self.braid_seif_control = self.braid_str.get()
cpf_text = tk.Text(new_window, font=(font_style, font_size))
cpf_text.insert(1.0, "Conway Potential Function:\n"+
latex(self.cpf))
cpf_text.pack()
cpf_text.configure(state="disabled")
multi_var_alexander = tk.Text(
new_window, font=(font_style, font_size))
multi_var_alexander.insert(1.0,
"Mutivariable Alexander Polynomial:\n"+
latex(self.alexander))
multi_var_alexander.pack()
multi_var_alexander.configure(state="disabled")
# if tkinter is 8.5 or above you'll want the selection background
# to appear like it does when the widget is activated
# comment this out for older versions of Tkinter
cpf.configure(inactiveselectbackground=cpf.cget(
"selectbackground"))
multi_var_alexander.configure(
inactiveselectbackground=cpf.cget("selectbackground"))
except ValueError:
pass
# Save the seifert matrices to a file
def get_seifert_matrices(self):
if((self.braid_seif_control.strip() == self.braid_str.get().strip())
and self.computed_invariants):
pass
else:
graph = self.color.get_graph()
# Print the Euler characteristic of the SGraph
self.get_sgraph_euler_char()
(self.seif, self.pm) = presentation_matrix(graph)
file_name = tk.filedialog.asksaveasfilename()
self.invariant_frame.destroy()
self.invariant_frame = Inv(self)
self.invariant_frame.grid(column=0, row=11,
columnspan=4, rowspan=3)
p = self.strands.make_braid()
graph = self.invariant_frame.graph
if(file_name):
if("." not in file_name):
file_name += ".txt"
f = open(file_name, 'w+')
f.write("Braid: "+str(p.braid_wrong))
f.write("\nStrands: "+str(p.strands)+"\n\n")
f.write(self.seif)
f.close()
# Command for computing and displaying invariants
def get_invariants(self):
self.invariant_frame.destroy()
self.view_braid()
self.view_c_complex()
self.invariant_frame = Inv(self)
self.invariant_frame.grid(column=0, row=11,
columnspan=4, rowspan=3)
# Command to view the braid
def view_braid(self):
try:
close(self.braid_fig)
except Exception:
pass
self.braid_visual.destroy()
self.braid_visual = tk.Frame(self)
self.braid_visual.grid(
column=0, row=14, pady=10, columnspan=4)
self.braid_fig = visualize_braid(self.color.get_col_braid())
# creating the Tkinter canvas
# containing the Matplotlib figure
canvas = FigureCanvasTkAgg(self.braid_fig, master=self.braid_visual)
canvas.draw()
# placing the canvas on the Tkinter window
canvas.get_tk_widget().pack()
# Command to view the C-Complex
def view_c_complex(self):
try:
close(self.ccomplex_fig)
except Exception:
pass
self.ccomplex_visual.destroy()
self.ccomplex_visual = tk.Frame(self)
self.ccomplex_visual.grid(
column=0, row=15, pady=10, columnspan=4)
self.ccomplex_fig = visualize_clasp_complex(self.color.get_graph())
# creating the Tkinter canvas
# containing the Matplotlib figure
canvas = FigureCanvasTkAgg(self.ccomplex_fig,
master=self.ccomplex_visual)
canvas.draw()
# placing the canvas on the Tkinter window
canvas.get_tk_widget().pack()
# Class for invariants
class Inv(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.parent = parent
# Configure the grid
self.grid_columnconfigure(0, weight=1)
self.grid_columnconfigure(1, weight=1)
self.grid_columnconfigure(2, weight=1)
self.grid_columnconfigure(3, weight=1)
try:
graph = parent.color.get_graph()
self.graph = graph
except ValueError:
pass
omega = parent.signature.get_omega()
# Print the Euler characteristic of the SGraph
self.parent.get_sgraph_euler_char()
if((self.parent.braid_inv_control.strip() ==
self.parent.braid_str.get().strip())
and self.parent.computed_invariants):
pass
else:
graph = self.parent.color.get_graph()
# Print the Euler characteristic of the SGraph
self.parent.get_sgraph_euler_char()
if(self.parent.braid_seif_control.strip() !=
self.parent.braid_str.get().strip()):
(self.parent.seif, self.parent.pm) = presentation_matrix(graph)
self.parent.cpf = self.parent.pm.conway_potential_function(graph)
self.parent.alexander = \
self.parent.pm.multivar_alexander_poly(graph)
self.parent.computed_invariants = True
self.parent.computed_seif = True
self.parent.braid_inv_control = self.parent.braid_str.get()
self.parent.braid_seif_control = self.parent.braid_str.get()
ttk.Label(self, text='Conway Potential Function:',
font=(font_style, font_size)).grid(
column=0, row=0, pady=10)
self.make_latex_label(latex(self.parent.cpf),
column=1, row=0, y_pad=10, sticky='W',
columnspan=3, rowspan=1, size=(2000, 100))
ttk.Label(self, text='Multivariable Alexander Polynomial:',
font=(font_style, font_size)).grid(
column=0, row=1, pady=10)
self.make_latex_label(latex(self.parent.alexander),
column=1, row=1, y_pad=10, sticky='W',
columnspan=3, rowspan=1, size=(2000, 50))
ttk.Label(self, text='Cimasoni-Florens Signature:',
font=(font_style, font_size)).grid(
column=0, row=2, pady=15)
signat = self.parent.pm.signature(omega)
ttk.Label(self, text=str(signat[0]), font=(font_style, 30)).grid(
column=1, row=2, pady=15, sticky='W')
eig_val_str = str([round_to_2(x) for x in signat[1]])[1:-1]
eig_val = "(Eigenvalues: "+eig_val_str+")"
ttk.Label(self, text=str(eig_val), font=(font_style, 25)).grid(
column=2, row=2, columnspan=2, padx=10, pady=15, sticky='W')
# Renders latex as a label and places it on the grid
def make_latex_label(self, latex_string: str, column: int,
row: int, y_pad: int, sticky: str, columnspan: int, rowspan: int,
size = Tuple[int, int]):
# Creating buffer for storing image in memory
buffer = BytesIO()
# Writing png image with our rendered latex text to buffer
math_to_image("$" + latex_string + "$",
buffer, dpi=1000, format='png')
# Remoting buffer to 0, so that we can read from it
buffer.seek(0)
# Creating Pillow image object from it
pimage= Image.open(buffer)
pimage.thumbnail(size)
# Creating PhotoImage object from Pillow image object
image = ImageTk.PhotoImage(pimage)
# Creating label with our image
label = ttk.Label(self, image=image)
# Storing reference to our image object so it's not garbage collected,
# since TkInter doesn't store references by itself
label.img = image
label.grid(column=column, row=row, pady=y_pad, sticky=sticky,
columnspan=columnspan, rowspan=rowspan)
buffer.flush()
# Class for strand inputs
class Strands(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.parent = parent
braid = self.parent.braid_str.get()
# Configure the two columns
self.grid_columnconfigure(0, weight=1)
self.grid_columnconfigure(1, weight=2)
# Add title
ttk.Label(
self, text='''Number of strands''',
font=(font_style, font_size), background='yellow').grid(
column=0, row=0, columnspan=2)
# Configure frame for printing defaults
self.strand_default = tk.Frame(self)
self.strand_check = tk.Frame(self)
# Configure variables to hold inputs
self.strand_choice = tk.IntVar(value=0)
self.strand_str = tk.StringVar()
# Configure and place radio buttons and entries
# Default
self.use_defaults = ttk.Radiobutton(self, text="Default",
variable=self.strand_choice,
style='C.TRadiobutton', value=1, command=self.make_braid)
self.use_defaults.grid(column=0, row=1, pady=10, sticky='W')
# Custom
self.use_custom = ttk.Radiobutton(self, text="Custom: ",
variable=self.strand_choice,
style='C.TRadiobutton', value=2, command=self.make_braid)
self.use_custom.grid(column=0, row=2, pady=10, sticky='W')
ttk.Entry(self, textvariable=self.strand_str,
font=(font_style, font_size)).grid(
column=1, row=2, padx=0, pady=10, sticky='W')
# Example of a custom entry
ttk.Label(self, text="Example: '3'",
font=(font_style, font_size), background='cyan').grid(
column=1, row=3, pady=10, sticky='W')
# Make a braid and return error messages
def make_braid(self) -> Braid:
# Destroy and reinitialize message frames
self.parent.braid_check.destroy()
self.strand_default.destroy()
self.strand_check.destroy()
self.strand_check = tk.Frame(self)
self.strand_default = tk.Frame(self)
self.parent.braid_check = tk.Frame(self.parent)
self.parent.braid_check.grid(column=0, row=2, pady=10)
self.strand_default.grid(column=1, row=1, pady=10, sticky='W')
self.strand_check.grid(column=0, row=5, pady=10, columnspan=2)
strand_check_message = ""
braid = self.parent.braid_str.get()
try:
strand_option = self.strand_choice.get()
assert strand_option != 0, AssertionError
if('{' in braid):
p = self.parent.link_info(braid)
elif(',' in braid):
braid1 = self.parent.csv_input(braid)
else:
braid1 = self.parent.space_input(braid)
except AssertionError:
strand_check_message += "Specify strands."
except ValueError:
ttk.Label(self.parent.braid_check, text="Bad braid input",
font=(font_style, font_size), background="pink").pack()
try:
if(strand_option == 2):
strands = self.strand_str.get()
strands = int(strands)
p = Braid(braid1, strands)
else:
if('{' not in braid):
strands = max(list(map(lambda x: abs(x), braid1)))+1
p = Braid(braid1, strands)
ttk.Label(self.strand_default, text="= "+ str(p.strands),
font=(font_style, font_size)).pack(anchor='w')
except ValueError:
strand_check_message += "Bad strand input."
except UnboundLocalError:
pass
if(strand_check_message!=""):
ttk.Label(self.strand_check, text=strand_check_message,
font=(font_style, font_size), background="pink").pack()
try:
return p
except Exception:
pass
# Class for color inputs
class Color(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.parent = parent
braid = self.parent.braid_str.get()
# Configure the two columns
self.grid_columnconfigure(0, weight=1)
self.grid_columnconfigure(1, weight=2)
# Add title
ttk.Label(
self, text='''Colors''',
font=(font_style, font_size), background='yellow').grid(
column=0, row=0, columnspan=2)
# Configure frame for printing defaults
self.one_color_default = tk.Frame(self)
self.multi_color_default = tk.Frame(self)
self.color_check = tk.Frame(self)
# Configure variables to hold inputs
self.color_choice = tk.IntVar(value=0)
self.color_str = tk.StringVar()
# Configure and place radio buttons and entries
# One color
self.use_one_color = ttk.Radiobutton(self, text="One color",
variable=self.color_choice,
style='C.TRadiobutton', value=1, command=self.get_col_braid)
self.use_one_color.grid(column=0, row=1, pady=10, sticky='W')
# One per knot
self.use_one_per_knot = ttk.Radiobutton(self, text="One per knot",
variable=self.color_choice,
style='C.TRadiobutton', value=2, command=self.get_col_braid)
self.use_one_per_knot.grid(column=0, row=2, pady=10, sticky='W')
# Custom
self.use_custom = ttk.Radiobutton(self, text="Custom: ",
variable=self.color_choice,
style='C.TRadiobutton', value=3, command=self.get_col_braid)
self.use_custom.grid(column=0, row=3, pady=10, sticky='W')
ttk.Entry(self, textvariable=self.color_str,
font=(font_style, font_size)).grid(
column=1, row=3, padx=0, pady=10, sticky='W')
# Example of a custom entry
ttk.Label(self, text="Example: '0 0 1' for 3 knots",
font=(font_style, font_size), background='cyan').grid(
column=1, row=4, pady=10, sticky='W')
# Make a colored braid and return error messages
# Command for getting the coloured braid
def get_col_braid(self) -> ColBraid:
self.color_check.destroy()
self.multi_color_default.destroy()
self.one_color_default.destroy()
self.color_check = tk.Frame(self)
self.multi_color_default = tk.Frame(self)
self.one_color_default = tk.Frame(self)
# Place frames for various defaults and error messages
self.color_check.grid(column=0, row=5, pady=10)
self.one_color_default.grid(column=1, row=1, pady=10, sticky='W')
self.multi_color_default.grid(column=1, row=2, pady=10, sticky='W')
self.parent.compute_cyc()
p = self.parent.strands.make_braid()
def print_col_list(lst: List[int]):
a = ""
for i in lst:
a += str(i) + " "
return a
try:
color_option = self.color_choice.get()
assert color_option != 0, AssertionError
if(color_option == 1):
col_list = [0]*p.ct_knots
ttk.Label(self.one_color_default,
text="= "+print_col_list(col_list),
font=(font_style, font_size)).pack(anchor='w')
elif(color_option == 2):
col_list = list(range(p.ct_knots))
ttk.Label(self.multi_color_default,
text="= "+print_col_list(col_list),
font=(font_style, font_size)).pack(anchor='w')
else:
col_list = self.color_str.get()
col_list = [int(x) for x in col_list.split(" ")]
col_signs = [1]*(max(col_list)+1)
p = ColBraid(p.braid, p.strands, col_list)
complete_choice = self.parent.complete_graph.get()
if(complete_choice==0):
p, col_signs = find_min_perm(p, col_signs, 50)
else:
p, col_signs = find_min_perm_complete(p, col_signs, 50)
return p
except ValueError:
ttk.Label(self.color_check, text="Bad color input",
font=(font_style, font_size), background="pink").pack()
except AssertionError:
ttk.Label(self.color_check, text="Specify colors",
font=(font_style, font_size), background="pink").pack()
# Makes the graph for the colored braid derived from the color inputs
def get_graph(self):
self.color_check.destroy()
self.multi_color_default.destroy()
self.one_color_default.destroy()
self.color_check = tk.Frame(self)
self.multi_color_default = tk.Frame(self)
self.one_color_default = tk.Frame(self)
# Place frames for various defaults and error messages
self.color_check.grid(column=0, row=5, pady=10)
self.one_color_default.grid(column=1, row=1, pady=10, sticky='W')
self.multi_color_default.grid(column=1, row=2, pady=10, sticky='W')
self.parent.compute_cyc()
p = self.parent.strands.make_braid()
def print_col_list(lst: List[int]):
a = ""
for i in lst:
a += str(i) + " "
return a
try:
color_option = self.color_choice.get()
assert color_option != 0, AssertionError
if(color_option == 1):
col_list = [0]*p.ct_knots
ttk.Label(self.one_color_default,
text="= "+print_col_list(col_list),
font=(font_style, font_size)).pack(anchor='w')
elif(color_option == 2):
col_list = list(range(p.ct_knots))
ttk.Label(self.multi_color_default,
text="= "+print_col_list(col_list),
font=(font_style, font_size)).pack(anchor='w')
else:
col_list = self.color_str.get()
col_list = [int(x) for x in col_list.split(" ")]
col_signs = [1]*(max(col_list)+1)
p = ColBraid(p.braid, p.strands, col_list)
complete_choice = self.parent.complete_graph.get()
if(complete_choice==0):
p, col_signs = find_min_perm(p, col_signs, 50)
graph = p.make_graph(col_signs)
else:
p, col_signs = find_min_perm_complete(p, col_signs, 50)
graph= p.make_graph_complete(col_signs)
return graph
except ValueError:
ttk.Label(self.color_check, text="Bad color input",
font=(font_style, font_size), background="pink").pack()
except AssertionError:
ttk.Label(self.color_check, text="Specify colors",
font=(font_style, font_size), background="pink").pack()
# Class for signature inputs
class Signature(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.parent = parent
braid = self.parent.braid_str.get()
# Configure the two columns
self.grid_columnconfigure(0, weight=1)
self.grid_columnconfigure(1, weight=2)
# Add title
ttk.Label(
self, text='''Signature inputs''',
font=(font_style, font_size), background='yellow').grid(
column=0, row=0, columnspan=2)
# Configure frame for printing defaults
self.signature_default = tk.Frame(self)
self.signature_check = tk.Frame(self)
# Configure variables to hold inputs
self.signature_choice = tk.IntVar(value=0)
self.signature_str = tk.StringVar()
# Configure and place radio buttons and entries
# Default
self.use_defaults = ttk.Radiobutton(self, text="Default",
variable=self.signature_choice,
style='C.TRadiobutton', value=1, command=self.get_omega)
self.use_defaults.grid(column=0, row=1, pady=10, sticky='W')
# Custom
self.use_custom = ttk.Radiobutton(self, text="Custom: ",
variable=self.signature_choice,
style='C.TRadiobutton', value=2, command=self.get_omega)
self.use_custom.grid(column=0, row=2, pady=10, sticky='W')
ttk.Entry(self, textvariable=self.signature_str,
font=(font_style, font_size)).grid(
column=1, row=2, padx=0, pady=10, sticky='W')
# Example of a custom entry
ttk.Label(self, text="Example: '1/2 1/3' means '(pi, 2*pi/3)'",
font=(font_style, font_size), background='cyan').grid(
column=1, row=3, pady=10, sticky='W')
# Get the signature input and return error messages
def get_omega(self) -> Braid:
# Destroy and reinitialize message frames
self.signature_default.destroy()
self.signature_check.destroy()
self.signature_check = tk.Frame(self)
self.signature_default = tk.Frame(self)
self.signature_default.grid(column=1, row=1, pady=10, sticky='W')
self.signature_check.grid(column=0, row=5, pady=10, columnspan=2)
signature_inputs = self.signature_str.get()
graph = self.parent.color.get_graph()
try:
signature_option = self.signature_choice.get()
assert signature_option != 0, AssertionError
if(signature_option == 1):
omega = [complex(-1, 0)]*graph.colors
ttk.Label(self.signature_default, text="= "+ "1/2 "*graph.colors,
font=(font_style, font_size)).pack(anchor='w')
else:
complex_tuple = [eval(x) for x in
signature_inputs.strip().split(" ")]
for c in complex_tuple:
if(c==1.0):
ttk.Label(self.signature_check,
text="2*pi is not allowed.",
font=(font_style, font_size),
background='pink').pack(anchor='w')
omega = [complex(cos(2*pi*x), sin(2*pi*x))
for x in complex_tuple]
except AssertionError:
ttk.Label(self.signature_check, text="Specify signature inputs",
font=(font_style, font_size),
background='pink').pack(anchor='w')
except ValueError:
ttk.Label(self.signature_check, text="Bad signature inputs",
font=(font_style, font_size),
background='pink').pack(anchor='w')
try:
return omega
except Exception:
pass
# Class for Casson Gordon inputs
class Casson_Gordon(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.parent = parent
# Configure the two columns
self.grid_columnconfigure(0, weight=1)
self.grid_columnconfigure(1, weight=2)
# Add title
ttk.Label(
self, text='''Casson-Gordon invariants''',
font=(font_style, font_size), background='yellow').grid(
column=0, row=0, columnspan=2)
# Configure variables to hold inputs
self.framing = tk.StringVar()
self.q_ni_cg = tk.StringVar()
# Configure and place labels for inputs and and examples
ttk.Label(self, text="Framing:",
font=(font_style, font_size)).grid(
column=0, row=1, padx=0, pady=10)
ttk.Label(self, text="Example: '1 0 -2'."+
" Framing = self-linking numbers of knots.",
font=(font_style, font_size), background='cyan').grid(
column=0, row=2, columnspan=2, padx=0, pady=10)
ttk.Label(self, text="q, n_i tuple:",
font=(font_style, font_size)).grid(
column=0, row=3, padx=0, pady=10)
ttk.Label(self, text="Example: '5, 2 3 2' means q = 5, n_1 = 3."+
" See paper.",
font=(font_style, font_size), background='cyan').grid(
column=0, row=4, columnspan=2, padx=0, pady=10)
# Configure and place entry boxes
ttk.Entry(self, textvariable=self.framing,
font=(font_style, font_size)).grid(
column=1, row=1, padx=0, pady=10, sticky='W')
ttk.Entry(self, textvariable=self.q_ni_cg,
font=(font_style, font_size)).grid(
column=1, row=3, padx=0, pady=10, sticky='W')
self.casson_gordon_frame = tk.Frame(self)
def compute_casson_gordon(self):
self.casson_gordon_frame.destroy()
self.casson_gordon_frame = tk.Frame(self)
self.casson_gordon_frame.grid(
column=0, row=5, columnspan=2, padx=0, pady=10)
self.casson_gordon_frame.grid_columnconfigure(0)
self.casson_gordon_frame.grid_columnconfigure(1)
ttk.Label(self.casson_gordon_frame, text="Casson-Gordon invariant:",
font=(font_style, font_size)).grid(
column=0, row=0, padx=0, pady=10)
framing_str = self.framing.get()
q_ni_cg_str = self.q_ni_cg.get()
framing_val = [int(x) for x in framing_str.split(" ")]
q = int(q_ni_cg_str.strip()[0])
ni_tuple_str = q_ni_cg_str[q_ni_cg_str.find(",")+1:].strip().split(" ")
ni_tuple = [int(x) for x in ni_tuple_str]
p = self.parent.strands.make_braid()
ttk.Label(self.casson_gordon_frame,
text=str(casson_gordon(framing_val, q, ni_tuple, p)),
font=(font_style, font_size)).grid(
column=1, row=0, padx=0, pady=10)
def get_casson_gordon(self):
try:
self.compute_casson_gordon()
except (ValueError, AttributeError):
self.casson_gordon_frame.destroy()
self.casson_gordon_frame = tk.Frame(self)
ttk.Label(self, text="Check inputs",
font=(font_style, font_size), background='pink').grid(
column=0, row=5, columnspan=2, padx=0, pady=10)
# Executing everything
if __name__ == "__main__":
root = tk.Tk()
root.title("Clasper")
# Get the screen dimension
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
# Find the center point
center_x = int(screen_width/2)
center_y = int(screen_height/2)
window_width = screen_width
window_height = screen_height
# Set the position of the window to the center of the screen
root.geometry(f'{window_width}x{window_height}+{center_x}+{0}')
root.state('zoomed')
clasper_canvas = tk.Canvas(root)
hbar = tk.Scrollbar(root, orient='horizontal',
command=clasper_canvas.xview)
scrollbar = tk.Scrollbar(root, orient='vertical',
command=clasper_canvas.yview)
hbar.pack(side="bottom", fill="both")
clasper_canvas.pack(side="left", fill="both", expand=True, padx=10, pady=10)
scrollbar.pack(side="right", fill="both")
clasper_canvas['yscrollcommand'] = scrollbar.set
clasper_canvas['xscrollcommand'] = hbar.set
clasper = Clasper(clasper_canvas)
def onCanvasConfigure(e):
clasper_canvas.configure(scrollregion=clasper_canvas.bbox("all"))
clasper_canvas.itemconfig('frame',
height=2800,
width=3000)
clasper_canvas.create_window(0, 0,
height=2800,
width=3000,
window=clasper, anchor="nw", tags="frame")
clasper_canvas.bind("<Configure>", onCanvasConfigure)
clasper_canvas.configure(scrollregion=clasper_canvas.bbox("all"))
clasper_canvas.itemconfig('frame',
height=2800,
width=3000)
def on_mousewheel(event):
clasper_canvas.yview_scroll(int(-1*(event.delta/120)), "units")
def on_shift_mousewheel(event):
clasper_canvas.xview_scroll(int(-1*(event.delta/120)), "units")
root.bind_all("<MouseWheel>", on_mousewheel)
root.bind_all("<Shift-MouseWheel>", on_shift_mousewheel)
root.bind('<Return>', clasper.compute_with_defaults)
try:
from ctypes import windll
windll.shcore.SetProcessDpiAwareness(1)
finally:
root.mainloop()
# Setting up the entry for strands
"""ttk.Label(
self, text='Number of Strands:',
font=(font_style, font_size)).grid(column=0, row=2, pady=10)
self.strand_str = tk.StringVar()
ttk.Entry(self, textvariable=self.strand_str,
font=(font_style, font_size)).grid(
column=1, row=2, padx=0, pady=10, sticky='W', columnspan=3)"""
# Set up entry for the colour list
"""ttk.Label(self, text='Colours (start from 0, BFD):',
font=(font_style, font_size)).grid(
column=0, row=5, pady=10)
self.colour_list = tk.StringVar()
ttk.Entry(self, textvariable=self.colour_list,
font=(font_style, font_size)).grid(
column=1, row=5, padx=0, pady=10, sticky='W', columnspan=3)"""
# Set up entry for orientations of colours
"""ttk.Label(self, text='Orientations (+1/-1, BFD):',
font=(font_style, font_size)).grid(
column=0, row=6, pady=10)
self.colour_signs = tk.StringVar()
ttk.Entry(self, textvariable=self.colour_signs,
font=(font_style, font_size)).grid(
column=1, row=6, padx=0, pady=10, sticky='W', columnspan=3)
"""
# Set up entry for complex tuple
"""ttk.Label(self, text='Signature input,'+
'space sep\n (1/3 means 2*pi/3, BFD):',
font=(font_style, font_size)).grid(
column=0, row=7, pady=10)
self.cplx_tuple = tk.StringVar()
ttk.Entry(self, textvariable=self.cplx_tuple,
font=(font_style, font_size)).grid(
column=1, row=7, padx=0, pady=10, sticky='W', columnspan=2)"""
| 38,747 | 12,571 |
from .audit import auditApiCall
from .exceptions import InvalidCommand
__all__ = ['Host']
class Host(object):
"""
The Host provides an abstraction of the caller of API functions. Largely to
provide information about the Document state to the Manager, without
requiring specific knowledge of which host it is currently running in.
"""
def __init__(self):
super(Host, self).__init__()
##
# @name Host Information
#
## @{
@auditApiCall("Host")
def getIdentifier(self):
"""
Returns an identifier to uniquely identify a Host.
This may be used by a Manager's @ref ManagerInterfaceBase adjust its behaviour
accordingly. The identifier should be unique for any application, but
common to all versions.
The identifier should use only alpha-numeric characters and '.', '_' or '-'.
For example:
"uk.co.foundry.hiero"
@return str
"""
raise NotImplementedError
@auditApiCall("Host")
def getDisplayName(self):
"""
Returns a human readable name to be used to reference this specific
host.
"Hiero"
"""
raise NotImplementedError
@auditApiCall("Host")
def getInfo(self):
"""
Returns other information that may be useful about this Host.
This can contain arbitrary key/value pairs. Managers never rely directly
on any particular keys being set here, but the information may be
useful for diagnostic or debugging purposes. For example:
{ 'version' : '1.1v3' }
"""
return {}
## @}
##
# @name Commands
#
# The commands mechanism provides a means for Hosts and asset managers to
# extend functionality of the API, without requiring any new methods.
#
# The API represents commands via a @ref
# python.specifications.CommandSpecification, which maps to a 'name' and some
# 'arguments'.
#
## @{
@auditApiCall("Host")
def commandSupported(self, command, context):
"""
Determines if a specified command is supported by the Host.
@return bool, True if the Host implements the command, else False.
@see commandIsSupported()
@see runCommand()
"""
return False
@auditApiCall("Host")
def commandAvailable(self, command, context):
"""
Determines if specified command is permitted or should succeed in the
current context. This call can be used to test whether a command can
be carried out, generally to provide some meaningful feedback to a user
so that they don't perform an action that would consequently error.
For example, the 'checkout' command for an asset may return false here
if that asset is already checked out by another user, or the current
user is not allowed to check the asset out.
@exception python.exceptions.InvalidCommand If an un-supported command is
passed.
@return (bool, str), True if the command should complete stressfully if
called, False if it is known to fail or is not permitted. The second part
of the tuple will contain any meaningful information from the system to
qualify the status.
@see commandIsSupported()
@see runCommand()
"""
return False, "Unsupported command"
@auditApiCall("Host")
def runCommand(self, command, context):
"""
Instructs the Host to perform the specified command.
@exception python.exceptions.InvalidCommand If the command is not
implemented by the system.
@exception python.exceptions.CommandError if any other run-time error
occurs during execution of the command
@return Any result of the command.
@see commandSupported()
@see commandAvailable()
"""
raise InvalidCommand
## @}
@auditApiCall("Host")
def getDocumentReference(self):
"""
@return str, The path, or @ref entity_reference of the current document, or
an empty string if not applicable. If a Host supports multiple concurrent
documents, it should be the 'frontmost' one. If there is no meaningful
document reference, then an empty string should be returned.
"""
return ''
##
# @name Entity Reference retrieval
#
## @{
@auditApiCall("Host")
def getKnownEntityReferences(self, specification=None):
"""
@return list, An @ref entity_reference for each Entities known to the host
to be used in the current document, or an empty list if none are known.
@param specification python.specifications.Specification [None] If
supplied, then only entities of the supplied specification should be
returned.
"""
return []
@auditApiCall("Host")
def getEntityReferenceForItem(self, item, allowRelated=False):
"""
This should be capable of taking any item that may be set in a
locale/etc... or a Host-native API object and returning an @ref
entity_reference for it, if applicable.
@param allowRelated bool, If True, the Host can return a reference for some
parent or child or relation of the supplied item, if applicable. This can
be useful for broadening the area of search in less specific cases.
@return str, An @ref entity_reference of an empty string if no applicable
Entity Reference could be determined for the supplied item.
"""
return ''
## @}
def log(self, message, severity):
"""
Logs the supplied message though the most appropriate means for the Host.
This ensures that the message will be presented to the user in the same
fashion as any of the applications own messages.
@note Though this method could be optional, as there is a default logging
implementation, it is required to ensure that a Host implementation
consciously considers the way log messages are presented.
@see python.logging
"""
raise NotImplementedError
# def progress(self, decimalProgress, message):
# """
#
# A method to provide alternate progress reporting. If not implemented, then
# the standard logging mechanism will print the progress message to the
# standard logging call with a progress severity.
#
# @see log
# @see python.logging
#
# """
# raise NotImplementedError
| 6,136 | 1,657 |
import copy
import queue
import pydot
class NZP:
def __init__(self):
self.names = ['-', '0', '+']
self.vals = [-1, 0, 1]
self.stationary = [False, True, False]
class ZP:
def __init__(self):
self.names = ['0', '+']
self.vals = [0, 1]
self.stationary = [True, False]
class ZPM:
def __init__(self):
self.names = ['0', '+', 'm']
self.vals = [0, 1, 2]
self.stationary = [True, False, True]
class QSpace(object):
def __init__(self, name, Qmodel, state):
self.name = name
self.q_model = Qmodel
self.current_state = state
self.maximum = len(self.q_model.vals)
def increase(self):
if self.current_state < self.maximum - 1:
self.current_state += 1
def decrease(self):
if self.current_state > 0:
self.current_state -= 1
def setStateAs(self, q_state):
# TODO add check if two states are the same
self.current_state = q_state.current_state
def getVal(self):
return self.q_model.vals[self.current_state]
def getName(self):
return self.q_model.names[self.current_state]
def isStationary(self):
return self.q_model.stationary[self.current_state]
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.getVal() == other.getVal()
return False
def __ne__(self, other):
return not self.__eq__(other)
class State:
def __init__(self, quantities):
self.state = {
'inflow': {'mag': quantities[0],
'der': quantities[1]},
'volume': {'mag': quantities[2],
'der': quantities[3]},
'outflow': {'mag': quantities[4],
'der': quantities[5]}
}
self.next_states = []
self.quantities = quantities
self.name = "0"
self.desc =""
def __eq__(self, other):
if isinstance(other, self.__class__):
for idx in range(len(self.quantities)):
if self.quantities[idx] != other.quantities[idx]:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
class StateChange:
def __init__(self, desc):
self.desciption = desc
def stationaryToIntervalChange(state_obj):
for qt in state_obj.quantities:
if qt.isStationary():
return True
return False
def genFlipedInflow(state_obj):
states = []
if state_obj.state['inflow']['der'].getVal() == 0:
states.append(newState(state_obj,[('inflow','der',+1)],
desc="Id+", transition="increase"))
if state_obj.state['inflow']['mag'].getVal() != 0:
states.append(newState(state_obj,[('inflow','der',-1)],
desc="Id-", transition="decrease"))
return states
if (state_obj.state['inflow']['mag'].getVal() == 0
and state_obj.state['inflow']['der'].getVal() == 1):
return states
if (state_obj.state['inflow']['mag'].getVal() == 1
and state_obj.state['outflow']['der'].getVal() == 0
and state_obj.state['outflow']['mag'].getVal() != 2):
return states
if (state_obj.state['inflow']['der'].getVal() == -1
and state_obj.state['outflow']['mag'].getVal() == 2):
return states
if state_obj.state['inflow']['der'].getVal() == -1:
states.append(newState(state_obj,[('inflow','der',+1)],
desc="Id+", transition="increase"))
return states
if state_obj.state['inflow']['der'].getVal() == 1:
states.append(newState(state_obj,[('inflow','der',-1)],
desc="Id-", transition="decrease"))
return states
return states
def newState(state_obj,change =[('inflow','der',0)],desc="", transition=""):
new_state = copy.deepcopy(state_obj)
for ch in change:
if ch[2] == -1:
new_state.state[ch[0]][ch[1]].decrease()
elif ch[2] == 1:
new_state.state[ch[0]][ch[1]].increase()
return {'state': new_state, 'desc':desc, 'transition': transition}
def generateNextStates(state_obj):
state = state_obj.state
new_states = []
# imidiate changes
if state['outflow']['mag'].getVal() == 0 and state['outflow']['der'].getVal() == 1:
new_states.append(newState(state_obj,[('volume','mag',1),('outflow','mag',1)],
desc="Im+->Vd+,Od+", transition="time"))
#new_states[-1]['state'].desc="Positive change in volume/outflow causes increase in magnitude of these quantities."
if state['inflow']['mag'].getVal() == 0 and state['inflow']['der'].getVal() == 1:
changes = [('inflow','mag',1)]
desc = "Id+->Im+. "
state_desc = "Positive change in inflow increases magnitude of inflow."
if state['outflow']['der'].isStationary():
changes.append(('outflow','der',1))
changes.append(('volume','der',1))
state_desc+=" Positive change in inflow magnitude causes to positively increase change of volume and outflow."
new_states.append(newState(state_obj,changes, desc=desc+"Im+->Vd+,Od+", transition="time"))
new_states[-1]['state'].desc=state_desc
if len(new_states) == 0:
new_states = new_states + genFlipedInflow(state_obj)
# Changes which take long time:
# increasing inflow volume
if (state['inflow']['mag'].getVal() == 1 and state['inflow']['der'].getVal() == 1):
# apply positive Infuence
if state['outflow']['mag'].getVal() != 2:
new_states.append(newState(state_obj,[('volume','der',+1),('outflow','der',+1)],
desc="E+->Vd+,Od+", transition="time"))
new_states[-1]['state'].desc="Increasing inflow. Increasing derivation of Volume and Outflow."
if state['outflow']['mag'].getVal() == 1 and state['outflow']['der'].getVal() == 1:
# go to maximal state
new_states.append(newState(state_obj,[('volume','mag',1),
('volume','der',-1),('outflow','mag',1),('outflow','der',-1)],
desc="E+->Om+", transition="time"))
new_states[-1]['state'].desc="Increasing inflow. Maximal capacity of container reached."
# rate of changes between inflow and outflow- outflow is faster -> go back to steady
if (state['outflow']['mag'].getVal() == 1
and state['outflow']['der'].getVal() == state['inflow']['der'].getVal()):
new_states.append(newState(state_obj,[('volume','der',-1),('outflow','der',-1)],
desc="Im<Om->Vd-,Od-", transition="time"))
new_states[-1]['state'].desc="Increasing inflow. Inflow is increasing slower than Outflow. The volume is in positive steady state."
# steady inflow volume
if (state['inflow']['mag'].getVal() == 1 and state['inflow']['der'].getVal() == 0):
change = -1* state['outflow']['der'].getVal()
s = '+' if change >0 else '-' if change < 0 else '~'
new_states.append(newState(state_obj,
[('volume','der',change),('outflow','der',change)],
desc="E~->Vd"+s+',Od'+s))
new_states[-1]['state'].desc="Positive steady inflow."
if state['outflow']['der'].getVal() == 1:
new_states.append(newState(state_obj,[('volume','mag',1),
('volume','der',-1),('outflow','mag',1),('outflow','der',-1)],
desc="E~->Vm+,Om+", transition="time"))
new_states[-1]['state'].desc="Positive steady inflow. Maximal capacity of container reached."
# decreasing inflow volume
if (state['inflow']['mag'].getVal() == 1 and state['inflow']['der'].getVal() == -1):
# apply negative influence
new_states.append(newState(state_obj,[('volume','der',-1),('outflow','der',-1)],
desc="E-->Vd-,Od-", transition="time"))
# extreme no inflow volume left
if state['outflow']['der'].getVal() == -1 and state['outflow']['mag'].getVal() < 2:
new_states.append(newState(state_obj,[('inflow','der',+1),('inflow','mag',-1)],
desc="E-->Id0,Im0", transition="time"))
new_states[-1]['state'].desc="Inflow is empty."
# colapsing from maximum to plus
if state['outflow']['mag'].getVal() == 2 and state['outflow']['der'].getVal() == -1:
new_states.append(newState(state_obj,[('volume','mag',-1),('outflow','mag',-1)],
desc="E-->Vm-,Om-", transition="time"))
new_states[-1]['state'].desc="Inflow is is slowing down what causes increase in outflow rate."
# speed of decrease can be different in inflow and outflow -> go to steady outflow
if (state['outflow']['der'].getVal() == state['inflow']['der'].getVal()
and not state['outflow']['mag'].isStationary()):
new_states.append(newState(state_obj,[('volume','der',+1),('outflow','der',+1)],
desc="E-->Vd-,Od-", transition="time"))
new_states[-1]['state'].desc="Positive steady state"
# no inflow volume
if (state['inflow']['mag'].getVal() == 0 and state['inflow']['der'].getVal() == 0):
if state['outflow']['mag'].getVal() > 0:
new_states.append(newState(state_obj,
[('volume','der',-1),('outflow','der',-1)],
desc="E0->Vd-,Od-", transition="time"))
if (state['outflow']['mag'].getVal() == 1 and state['outflow']['der'].getVal() == -1):
new_states.append(newState(state_obj,[('volume','der',1),('outflow','der',1),
('volume','mag',-1),('outflow','mag',-1)],
desc="E0->Vd+,Od+", transition="time"))
# print('new states generated: ',len(new_states))
return new_states
def printState(state_obj):
state = state_obj.state
print("State",state_obj.name)
print(state['inflow']['mag'].getName(), state['inflow']['der'].getName())
print(state['volume']['mag'].getName(), state['volume']['der'].getName())
print(state['outflow']['mag'].getName(), state['outflow']['der'].getName())
print('----------------------')
def createEdge(source, target, desc, transition):
return {"explanation": desc,"source": source, "target": target, "transition": transition}
def addNewState(edges, states, source, target, desc, transition):
source.next_states.append(target)
edges.append(createEdge(source,target,desc,transition))
states.append(target)
return edges, states
def existingState(states, state):
for s in states:
if s == state:
return s
return None
#------------------------------------ VISUALIZATION -------------------------------
# returns the values for all variables in text format
def getStateText(state):
in_mag = state.state['inflow']['mag'].getName()
in_der = state.state['inflow']['der'].getName()
vol_mag = state.state['volume']['mag'].getName()
vol_der = state.state['volume']['der'].getName()
out_mag = state.state['outflow']['mag'].getName()
out_der = state.state['outflow']['der'].getName()
return str(state.name)+'\n'+in_mag+" "+in_der+"\n"+vol_mag+" "+vol_der+"\n"+out_mag+" "+out_der
# generates a visual (directed) graph of all states
def generateGraph(edgeList):
graph = pydot.Dot(graph_type='digraph', center=True, size=15)
for edgeObj in edgeList:
transitionText = edgeObj['explanation'] # explanation for transition
transitionType = edgeObj['transition'] # type of transition (+, -, or time)
sourceState = edgeObj['source'] # source state (obj)
targetState = edgeObj['target'] # target state (obj)
if transitionType == "increase":
edgeFillColor = '#00FF00'
elif transitionType == "decrease":
edgeFillColor = '#FF0000'
else:
edgeFillColor = '#black'
sourceStateText = getStateText(sourceState) # all values of source state in text format
targetStateText = getStateText(targetState) # all values of target state in text format
if len(targetState.next_states) == 0:
nodeFillColor = '#81B2E0'
nodeBorder = 2.8
else:
nodeFillColor = '#92E0DF'
nodeBorder = 1.5
sourceNode = pydot.Node(sourceStateText, shape='rectangle',
style="filled", fillcolor='#92E0DF', penwidth=1.5)
graph.add_node(sourceNode)
targetNode = pydot.Node(targetStateText, shape='rectangle',
style="filled", fillcolor=nodeFillColor, penwidth=nodeBorder)
graph.add_node(targetNode)
edge = pydot.Edge(sourceNode, targetNode, label=transitionText,
color=edgeFillColor, penwidth=2.25)
graph.add_edge(edge)
return graph
def decodeDesc(desc):
out = desc.replace('d',"derivative] ")
out = out.replace('m',"magnitude] ")
out = out.replace('I',"[Inflow ")
out = out.replace('E+',"Inflow is increasing ")
out = out.replace('E-',"Inflow is decreasing ")
out = out.replace('E~',"Inflow is positive ")
out = out.replace('E0',"Inflow is closed ")
out = out.replace(',',"and ")
out = out.replace('->',"implies that ")
out = out.replace('O',"[Outflow ")
out = out.replace('V',"[Volume ")
out = out.replace('+',"increases ")
out = out.replace('-',"decreases ")
# out = out.replace('~',"is steady ")
out = out.replace('<',"is less than ")
out = out.replace('.',"\n ")
return out
def printIntraState(state_obj):
state = state_obj.state
printState(state_obj)
print(state_obj.desc)
for var in ['inflow', 'outflow', 'volume']:
if state[var]['der'].getVal() == 1 and state[var]['mag'].getVal() == 1:
print(var+ ' quantity increasing')
if state[var]['der'].getVal() == 0 and state[var]['mag'].getVal() == 1:
print(var+ ' quantity is steady')
if state[var]['der'].getVal() == -1 and state[var]['mag'].getVal() == 1:
print(var+ ' quantity decreasing')
'''
if state_obj.desc == None or state_obj.desc == '':
if state['inflow']['der'].getVal() == 0:
print("Initial state. Inflow is empty.")
if state['inflow']['der'].getVal() == 1:
print("Increasing inflow.")
if state['volume']['der'].getVal() == -1:
print('Decreasing volume / outflow.')
if state['volume']['der'].getVal() == 1:
print('Increasing volume / outflow.')
if state['volume']['der'].getVal() == 0:
print('Steady volume / outflow.')
# if state['inflow']['der'].getVal() == 1:
# print('Inflow is increasing')
# if state['inflow']['der'].getVal() == -1:
# print('Inflow is decreasing')
# if state['inflow']['der'].getVal() == 0 and state['inflow']['mag'].getVal() == 0:
# print('Inflow is positive without change')
# if state['outflow']['mag'].getVal() == 2:
# print('Container is full.')
# if state['outflow']['der'].getVal() == 1:
# print('')
'''
print('----------------------')
def printInterstate(name_a,name_b,desc):
print("{:<3}->{:<3}:{:<30}{:<100}".format(name_a,name_b,desc,decodeDesc(desc)))
# --------------------------------------- MAIN --------------------------------------
inflow_mag = QSpace('inflow_mag', ZP(), 0)
inflow_der = QSpace('inflow_der', NZP(), 1)
volume_mag = QSpace('volume_mag', ZPM(), 0)
volume_der = QSpace('volume_der', NZP(), 1)
outflow_mag = QSpace('outflow_mag', ZPM(), 0)
outflow_der = QSpace('outflow_der', NZP(), 1)
initial_state = State(
[inflow_mag, inflow_der,
volume_mag, volume_der,
outflow_mag, outflow_der])
states = [initial_state]
edges = []
fringe = queue.Queue()
fringe.put(initial_state)
iteration = 0
print("INTER-STATE TRACE")
dot_graph = None
while not fringe.empty():
curr_state = fringe.get(block=False)
new_states = generateNextStates(curr_state)
for state_dict in new_states:
same_state = existingState(states, state_dict['state'])
if same_state is None:
state_dict['state'].name = str(len(states))
edges, states = addNewState(edges, states,
source=curr_state, target=state_dict['state'],
desc=state_dict['desc'],transition=state_dict['transition'])
fringe.put(state_dict['state'])
printInterstate(curr_state.name,state_dict['state'].name,state_dict['desc'])
elif curr_state != same_state:
curr_state.next_states.append(same_state)
edges.append(createEdge(source=curr_state, target=same_state,
desc=state_dict['desc'], transition=state_dict['transition']))
printInterstate(curr_state.name,same_state.name,state_dict['desc'])
dot_graph = generateGraph(edges)
iteration+=1
# print('************'+str(iteration)+'*****************')
# input("Press Enter to continue...")
dot_graph.write('graph.dot')
dot_graph.write_png('TEST_graph.png')
print("\n")
print("INTRA-STATE TRACE")
for st in states:
printIntraState(st)
print("\n")
| 17,259 | 5,439 |
from fhwebscrapers.B3derivatives.curvasb3 import ScraperB3
from fhwebscrapers.CETIP.getcetipdata import CETIP
__all__ = ['ScraperB3', 'CETIP']
| 144 | 66 |
from .lite_data_store import LiteDataStore
| 43 | 15 |
from conf import settings
import pandas as pd
import numpy as np
import datetime
import os
def stringify_results(res, reg_conf, regression_key):
res_string = """
-------------------------------
{datetime}
SELECTED MODEL: {model}
Link Function (y-transform): {link}
Other Transformations (x-transform):
{transf}
PRAMETERS:
{params}
TRAIN DATA
> SME : {sme_train} ({sme_train_before})
> RSME: {rsme_train} ({rsme_train_before})
> AME : {ame_train} ({ame_train_before})
TEST DATA
> SME : {sme_test} ({sme_test_before})
> RSME: {rsme_test} ({rsme_test_before})
> AME : {ame_test} ({ame_test_before})
TEMPORAL VALIDATION (2017)
> SME : {sme_valid} ({sme_valid_before})
> RSME: {rsme_valid} ({rsme_valid_before})
> AME : {ame_valid} ({ame_valid_before})
Response Variable Stats (insured employment) -- train data
Stats:
{stats}
Temp. Validation RMSE / response_mean = {mean}
Temp. Validation RMSE / response_median = {median}
"""
# Response Variable Stats
stats = pd.DataFrame(res.datasets.get_train(True, True), columns=["response-variable"]).describe()
# Stringify Parameters
params = ""
for param in reg_conf:
params += "\t> " + param + ": " + str(reg_conf[param]) + "\n"
# Stringify x-transforms
other_transf = ""
tranf_functions = res.datasets.transformations
for transf in tranf_functions:
other_transf += "\t> " + transf + ": " + str(tranf_functions[transf]) + "\n"
# Format Content
now = datetime.datetime.now()
content = res_string.format(
datetime=now.strftime("%Y/%m/%d %H:%M:%S"),
model=regression_key,
link=res.datasets.link,
transf=other_transf,
params=params,
sme_train=res.sme(settings.ModelConf.labels.train, apply_inverse=True),
sme_train_before=res.sme(settings.ModelConf.labels.train, apply_inverse=False),
rsme_train=res.rsme(settings.ModelConf.labels.train, apply_inverse=True),
rsme_train_before=res.rsme(settings.ModelConf.labels.train, apply_inverse=False),
ame_train=res.ame(settings.ModelConf.labels.train, apply_inverse=True),
ame_train_before=res.ame(settings.ModelConf.labels.train, apply_inverse=False),
sme_test=res.sme(settings.ModelConf.labels.test, apply_inverse=True),
sme_test_before=res.sme(settings.ModelConf.labels.test, apply_inverse=False),
rsme_test=res.rsme(settings.ModelConf.labels.test, apply_inverse=True),
rsme_test_before=res.rsme(settings.ModelConf.labels.test, apply_inverse=False),
ame_test=res.ame(settings.ModelConf.labels.test, apply_inverse=True),
ame_test_before=res.ame(settings.ModelConf.labels.test, apply_inverse=False),
sme_valid=res.sme(settings.ModelConf.labels.validate, apply_inverse=True),
sme_valid_before=res.sme(settings.ModelConf.labels.validate, apply_inverse=False),
rsme_valid=res.rsme(settings.ModelConf.labels.validate, apply_inverse=True),
rsme_valid_before=res.rsme(settings.ModelConf.labels.validate, apply_inverse=False),
ame_valid=res.ame(settings.ModelConf.labels.validate, apply_inverse=True),
ame_valid_before=res.ame(settings.ModelConf.labels.validate, apply_inverse=False),
stats=str(stats).replace("\n", "\n\t"),
mean=res.rsme(settings.ModelConf.labels.validate, apply_inverse=True) / stats.loc["mean"].values[0],
median=res.rsme(settings.ModelConf.labels.validate, apply_inverse=True) / stats.loc["50%"].values[0]
)
filename = now.strftime("%Y-%m-%d-%H-%M-%S") + "-" + regression_key + ".txt"
return filename, content
def logg_result(res, reg_conf, regression_key):
filename, content = stringify_results(res, reg_conf, regression_key)
print(content)
with open(os.path.join(settings.PROJECT_DIR, "logs", filename), "w") as file:
file.write(content)
def results_as_dict(res):
train_label = settings.ModelConf.labels.train
test_label = settings.ModelConf.labels.test
validate_label = settings.ModelConf.labels.validate
def reverse_dict(d):
return {v: k for k, v in d.items()}
def percentage_error(label, res):
original = sum(res.original_output(label, True))
pred = sum(res.prediction(label, True))
return 100 * np.abs(original - pred) / original
vdf = res.data(validate_label).copy()
vdf["prediction"] = res.prediction(validate_label, True)
vdf["value"] = res.original_output(validate_label, True)
vdf["abs_error"] = np.abs(vdf["prediction"] - vdf["value"])
reference_index = ((vdf.year + vdf.month / 12) == (vdf.year + vdf.month / 12).max()).values
vdf[reference_index].head()
categ = {}
for sc in res.datasets.string_cols:
vdf[sc] = vdf[sc].replace(reverse_dict(res.datasets.category_encoder[sc]))
temp = vdf.groupby(sc)[["prediction", "value", "abs_error"]].sum()
temp["percentage_error"] = 100 * temp["abs_error"] / temp["value"]
categ[sc] = temp.T.to_dict()
return {
"model-desc": {
"lags": [c for c in res.datasets.get_train().columns if "t-" in c]
},
"model-performance": {
train_label: {
"rsme": res.rsme(train_label, apply_inverse=True),
"ame": res.ame(train_label, apply_inverse=True),
"percentage-error": percentage_error(train_label, res)
},
test_label: {
"rsme": res.rsme(test_label, apply_inverse=True),
"ame": res.ame(test_label, apply_inverse=True),
"percentage-error": percentage_error(test_label, res)
},
validate_label: {
"rsme": res.rsme(validate_label, apply_inverse=True),
"ame": res.ame(validate_label, apply_inverse=True),
"percentage-error": percentage_error(validate_label, res)
}
},
"validation-data-2017": categ
}
| 6,040 | 2,040 |
# print("aaaaaaaaaa bbbbbbbbbb")
# # print(chr(27) + "[2J")
import os
import sys
from enum import Enum
import signal
print(getOutputType())
exit()
# import os
# os.system('cls' if os.name == 'nt' else 'clear')
size = os.get_terminal_size()
print(size[0])
if signal.getsignal(signal.SIGHUP) == signal.SIG_DFL: # default action
print("No SIGHUP handler")
else:
print("In nohup mode")
import time
for x in range (0,5):
b = "Loading" + "." * x
print (b, end="\r")
time.sleep(1)
import sys
print("FAILED...")
sys.stdout.write("\033[F") #back to previous line
time.sleep(1)
sys.stdout.write("\033[K") #clear line
print("SUCCESS!") | 665 | 271 |
import os
import time
import cv2
import random
import colorsys
import numpy as np
import tensorflow as tf
import pytesseract
import core.utils as utils
from core.config import cfg
import re
from PIL import Image
from polytrack.general import cal_dist
import itertools as it
import math
# import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
tf.config.set_visible_devices(physical_devices[0:1], 'GPU')
from absl import app, flags, logging
from absl.flags import FLAGS
import core.utils as utils
from core.yolov4 import filter_boxes
from tensorflow.python.saved_model import tag_constants
from PIL import Image
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
from polytrack.config import pt_cfg
model_weights = './checkpoints/custom-416'
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
saved_model_loaded = tf.saved_model.load(model_weights, tags=[tag_constants.SERVING])
infer = saved_model_loaded.signatures['serving_default']
def dl_detections_process(bboxes):
classes = utils.read_class_names(cfg.YOLO.CLASSES)
allowed_classes = pt_cfg.POLYTRACK.TRACKING_INSECTS
num_classes = len(classes)
_dl_detections = np.zeros(shape=(0,6))
out_boxes, out_scores, out_classes, num_boxes = bboxes
for i in range(num_boxes):
if int(out_classes[i]) < 0 or int(out_classes[i]) > num_classes: continue
coor = out_boxes[i]
score = out_scores[i]
class_ind = int(out_classes[i])
# print(class_ind, classes[class_ind])
class_name = classes[class_ind]
if class_name not in allowed_classes:
continue
else:
_dl_detections = np.vstack([_dl_detections,(coor[0], coor[1], coor[2], coor[3], class_name, score)])
return _dl_detections
def map_darkspots(__frame, _dark_spots):
for spot in _dark_spots:
__frame = cv2.circle(__frame, (int(spot[0]), int(spot[1])), int(pt_cfg.POLYTRACK.DL_DARK_SPOTS_RADIUS), (100,100,100), -1)
return __frame
def run_DL(_frame):
#if pt_cfg.POLYTRACK.DL_DARK_SPOTS:
#dark_spots = pt_cfg.POLYTRACK.RECORDED_DARK_SPOTS
#if len(dark_spots):
# _frame = map_darkspots(_frame, dark_spots)
#else:
# pass
# else:
# pass
_frame = cv2.cvtColor(_frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(_frame)
frame_size = _frame.shape[:2]
image_data = cv2.resize(_frame, (cfg.YOLO.INPUT_SIZE, cfg.YOLO.INPUT_SIZE))
image_data = image_data / 255.
image_data = image_data[np.newaxis, ...].astype(np.float32)
batch_data = tf.constant(image_data)
pred_bbox = infer(batch_data)
for key, value in pred_bbox.items():
boxes = value[:, :, 0:4]
pred_conf = value[:, :, 4:]
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=pt_cfg.POLYTRACK.MAX_OUTPUT_SIZE_PER_CLASS,
max_total_size=pt_cfg.POLYTRACK.MAX_TOTAL_SIZE,
iou_threshold=pt_cfg.POLYTRACK.DL_IOU_THRESHOLD,
score_threshold=pt_cfg.POLYTRACK.DL_SCORE_THRESHOLD
)
# format bounding boxes from normalized ymin, xmin, ymax, xmax ---> xmin, ymin, xmax, ymax
original_h, original_w, _ = _frame.shape
bboxes = utils.format_boxes(boxes.numpy()[0], original_h, original_w)
pred_bbox = [bboxes, scores.numpy()[0], classes.numpy()[0], valid_detections.numpy()[0]]
# read in all class names from config
class_names = utils.read_class_names(cfg.YOLO.CLASSES)
_detections = dl_detections_process(pred_bbox)
return _detections
#Calculate the area covered by the insect
def cal_bodyArea_DL(_x_TL,_y_TL,_x_BR,_y_BR):
_body_area = abs((_x_BR-_x_TL)*(_y_BR-_y_TL))
return _body_area
#Extract the data from result and calculate the center of gravity of the insect
def cal_CoG_DL(result):
_x_DL, _y_DL, _body_area, _radius = 0, 0, 0, 0
_x_TL = int(float(result[0]))
_y_TL = int(float(result[1]))
_x_BR = int(float(result[2]))
_y_BR = int(float(result[3]))
_x_DL = int(round((_x_TL+_x_BR)/2))
_y_DL = int(round((_y_TL+_y_BR)/2))
_radius = round(cal_dist(_x_TL, _y_TL,_x_DL,_y_DL)*math.cos(math.radians(45)))
_body_area = cal_bodyArea_DL(_x_TL,_y_TL,_x_BR,_y_BR)
return _x_DL,_y_DL, _body_area, _radius
#Detect insects in frame using Deep Learning
def detect_deep_learning(_frame, flowers = False):
_results = run_DL(_frame)
#print(flowers)
_deep_learning_detections = process_DL_results(_results, flowers)
if (len(_deep_learning_detections)>1) :
_deep_learning_detections = verify_insects_DL(_deep_learning_detections)
else:
pass
return _deep_learning_detections
def process_DL_results(_results, flowers):
_logDL = np.zeros(shape=(0,5)) #(create an array to store data x,y,area, conf, type)
for result in _results: # Go through the detected results
confidence = result[5]
_species = result[4]
if not flowers:
if ((_species != 'flower')): # Filter out detections which do not meet the threshold
_x_DL, _y_DL, _body_area, _ = cal_CoG_DL(result) #Calculate the center of gravity
_logDL = np.vstack([_logDL,(float(_x_DL), float(_y_DL), float(_body_area),_species,confidence)])
else:
pass
else:
if ((_species == 'flower')): # Filter out detections which do not meet the threshold
_x_DL, _y_DL, _ , _radius = cal_CoG_DL(result) #Calculate the center of gravity
_logDL = np.vstack([_logDL,(float(_x_DL), float(_y_DL), float(_radius),_species,confidence)])
else:
pass
return _logDL
# Calculate the distance between two coordinates
def cal_euclidean_DL(_insects_inFrame,_pair):
_dx = float(_insects_inFrame[_pair[0]][0]) - float(_insects_inFrame[_pair[1]][0])
_dy = float(_insects_inFrame[_pair[0]][1]) - float(_insects_inFrame[_pair[1]][1])
_dist = np.sqrt(_dx**2+_dy**2)
return _dist
#Verify that there are no duplicate detections (The distance between two CoG are >= 20 pixels)
def verify_insects_DL(_insects_inFrame):
_conflict_pairs = []
_combinations = it.combinations(np.arange(len(_insects_inFrame)), 2)
for pair in _combinations:
_distance = cal_euclidean_DL(_insects_inFrame,pair)
if (_distance<15):
_conflict_pairs.append(pair)
if (_conflict_pairs): _insects_inFrame = evaluvate_conflict(_conflict_pairs, _insects_inFrame)
return _insects_inFrame
#Evaluvate the confidence levels in DL and remove the least confidence detections
def evaluvate_conflict(_conflict_pairs, _insects_inFrame):
to_be_removed = []
for pairs in _conflict_pairs:
conf_0 = _insects_inFrame[pairs[0]][4]
conf_1 = _insects_inFrame[pairs[1]][4]
if (conf_0>=conf_1):to_be_removed.append(pairs[1])
else: to_be_removed.append(pairs[0])
to_be_removed = list(dict.fromkeys(to_be_removed)) #Remove duplicates
_insects_inFrame = np.delete(_insects_inFrame, to_be_removed, 0)
return _insects_inFrame
| 7,674 | 2,865 |
from setuptools import setup
from codecs import open
from os import path
NAME_REPO="imagechain"
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name=NAME_REPO,
packages=[NAME_REPO],
version='0.1',
license='MIT',
install_requires=[],
author='p-geon',
author_email='alchemic4s@gmail.com',
url='https://github.com/p-geon/' + NAME_REPO,
description='Image plotting & Image conversion',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='image plot',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7',
],
) | 765 | 255 |
from django import forms
from .models import *
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.db import transaction
from bookstoreapp.models import *
#ordersystem
from django import forms
# from django.contrib.auth.models import User
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import UserCreationForm
# Create your forms here.
# class BookForm(forms.ModelForm):
# class Meta:
# model = Books
# fields = ('file', 'image','author',"year_published",'title','price')
User = get_user_model()
#Authentication
# class CustomerSignUp(UserCreationForm):
# first_name= forms.CharField(label='First Name' ,error_messages={'required': 'Please enter your first name'})
# last_name= forms.CharField(label='Last Name',error_messages={'required': 'Please enter your last name'})
# email= forms.EmailField(label='Email Address' ,help_text='Format: 123@gmail.com, 456@yahoo.com',error_messages={'required': 'Please enter your email address'})
# class Meta(UserCreationForm.Meta):
# model = User
# fields=['first_name','last_name','username','email','password1','password2']
# @transaction.atomic
# def save(self):
# user = super().save(commit=False)
# user.is_customer=True
# user.save()
# customer = Customer.objects.create(user=user)
# customer.first_name = self.cleaned_data.get('first_name')
# customer.last_name = self.cleaned_data.get('last_name')
# customer.email = self.cleaned_data.get('email')
# return user
# class AuthorSignUp(UserCreationForm):
# first_name= forms.CharField(label='First Name' ,error_messages={'required': 'Please enter your first name'})
# last_name= forms.CharField(label='Last Name',error_messages={'required': 'Please enter your last name'})
# email= forms.EmailField(label='Email Address' ,help_text='Format: 123@gmail.com, 456@yahoo.com',error_messages={'required': 'Please enter your email address'})
# class Meta(UserCreationForm.Meta):
# model = User
# fields=['first_name','last_name','username','email','password1','password2']
# @transaction.atomic
# def save(self):
# user = super().save(commit=False)
# user.is_author=True
# user.save()
# author = Author.objects.create(user=user)
# author.first_name = self.cleaned_data.get('first_name')
# author.last_name = self.cleaned_data.get('last_name')
# author.email = self.cleaned_data.get('email')
# return user
#order system
class UserRegisterForm(UserCreationForm):
email = forms.EmailField(max_length=254, help_text='Required. Enter a valid email address.')
class Meta:
model = User
fields = ['username', 'email', 'password1', 'password2']
class ProductForm(forms.ModelForm):
class Meta:
model = Product
fields = ('image','name','description', 'price', 'digital','author','year_published') | 3,077 | 904 |
import sys, pygame,math
import numpy as np
from pygame import gfxdraw
import pygame_lib, nn_lib
import pygame.freetype
from pygame_lib import color
import random
import copy
import auto_maze
import node_vis | 207 | 61 |
import os
def parse_config_env(default_dict):
config_dict = {}
for key, value in default_dict.items():
config_dict[key] = os.environ.get(key, value)
return config_dict
SMTP_KEYS = {
"SMTP_HOST": "localhost",
"SMTP_PORT": 25,
"SMTP_FROM": "no-reply@example.com",
"SMTP_USER": None,
"SMTP_PASS": None,
"SMTP_CERT": None,
}
UAA_KEYS = {
"UAA_BASE_URL": "https://uaa.bosh-lite.com",
"UAA_CLIENT_ID": None,
"UAA_CLIENT_SECRET": None,
}
smtp = parse_config_env(SMTP_KEYS)
uaa = parse_config_env(UAA_KEYS)
| 563 | 245 |
#!/usr/bin/env python
# pylint: disable=redefined-outer-name,too-many-arguments,too-many-locals
"""The actual fixtures, you found them ;)."""
import logging
import itertools
from base64 import b64encode
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from ssl import create_default_context, SSLContext
from string import Template
from time import sleep, time
from typing import Dict, Generator, List, NamedTuple
import pytest
from docker import DockerClient, from_env
from lovely.pytest.docker.compose import Services
from _pytest.tmpdir import TempPathFactory
from .imagename import ImageName
from .utils import (
check_url_secure,
DOCKER_REGISTRY_SERVICE,
DOCKER_REGISTRY_SERVICE_PATTERN,
generate_cacerts,
generate_htpasswd,
generate_keypair,
get_docker_compose_user_defined,
get_embedded_file,
get_user_defined_file,
replicate_image,
start_service,
)
# Caching is needed, as singular-fixtures and list-fixtures will conflict at scale_factor=1
# This appears to only matter when attempting to start the docker secure registry service
# for the second time.
CACHE = {}
LOGGER = logging.getLogger(__name__)
class DockerRegistryCerts(NamedTuple):
# pylint: disable=missing-class-docstring
ca_certificate: Path
ca_private_key: Path
certificate: Path
private_key: Path
class DockerRegistryInsecure(NamedTuple):
# pylint: disable=missing-class-docstring
docker_client: DockerClient
docker_compose: Path
endpoint: str
images: List[ImageName]
service_name: str
# Note: NamedTuple does not support inheritance :(
class DockerRegistrySecure(NamedTuple):
# pylint: disable=missing-class-docstring
auth_header: Dict[str, str]
cacerts: Path
certs: DockerRegistryCerts
docker_client: DockerClient
docker_compose: Path
endpoint: str
htpasswd: Path
images: List[ImageName]
password: str
service_name: str
ssl_context: SSLContext
username: str
@pytest.fixture(scope="session")
def docker_client() -> DockerClient:
"""Provides an insecure Docker API client."""
return from_env()
def _docker_compose_insecure(
*,
docker_compose_files: List[str],
scale_factor: int,
tmp_path_factory: TempPathFactory,
) -> Generator[List[Path], None, None]:
"""
Provides the location of the docker-compose configuration file containing the insecure docker registry service.
"""
cache_key = _docker_compose_insecure.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
service_name = DOCKER_REGISTRY_SERVICE_PATTERN.format("insecure", i)
chain = itertools.chain(
get_docker_compose_user_defined(docker_compose_files, service_name),
# TODO: lovely-docker-compose uses the file for teardown ...
get_embedded_file(
tmp_path_factory, delete_after=False, name="docker-compose.yml"
),
)
for path in chain:
result.append(path)
break
else:
LOGGER.warning("Unable to find docker compose for: %s", service_name)
result.append("-unknown-")
CACHE[cache_key] = result
yield result
@pytest.fixture(scope="session")
def docker_compose_insecure(
docker_compose_files: List[str], tmp_path_factory: TempPathFactory
) -> Generator[Path, None, None]:
"""
Provides the location of the docker-compose configuration file containing the insecure docker registry service.
"""
for lst in _docker_compose_insecure(
docker_compose_files=docker_compose_files,
scale_factor=1,
tmp_path_factory=tmp_path_factory,
):
yield lst[0]
@pytest.fixture(scope="session")
def docker_compose_insecure_list(
docker_compose_files: List[str],
pdrf_scale_factor: int,
tmp_path_factory: TempPathFactory,
) -> Generator[List[Path], None, None]:
"""
Provides the location of the docker-compose configuration file containing the insecure docker registry service.
"""
yield from _docker_compose_insecure(
docker_compose_files=docker_compose_files,
scale_factor=pdrf_scale_factor,
tmp_path_factory=tmp_path_factory,
)
def _docker_compose_secure(
*,
docker_compose_files: List[str],
scale_factor: int,
tmp_path_factory: TempPathFactory,
) -> Generator[List[Path], None, None]:
"""
Provides the location of the templated docker-compose configuration file containing the secure docker registry
service.
"""
cache_key = _docker_compose_secure.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
service_name = DOCKER_REGISTRY_SERVICE_PATTERN.format("secure", i)
chain = itertools.chain(
get_docker_compose_user_defined(docker_compose_files, service_name),
get_embedded_file(
tmp_path_factory, delete_after=False, name="docker-compose.yml"
),
)
for path in chain:
result.append(path)
break
else:
LOGGER.warning("Unable to find docker compose for: %s", service_name)
result.append("-unknown-")
CACHE[cache_key] = result
yield result
@pytest.fixture(scope="session")
def docker_compose_secure(
docker_compose_files: List[str], tmp_path_factory: TempPathFactory
) -> Generator[Path, None, None]:
"""
Provides the location of the templated docker-compose configuration file containing the secure docker registry
service.
"""
for lst in _docker_compose_secure(
docker_compose_files=docker_compose_files,
scale_factor=1,
tmp_path_factory=tmp_path_factory,
):
yield lst[0]
@pytest.fixture(scope="session")
def docker_compose_secure_list(
docker_compose_files: List[str],
pdrf_scale_factor: int,
tmp_path_factory: TempPathFactory,
) -> Generator[List[Path], None, None]:
"""
Provides the location of the templated docker-compose configuration file containing the secure docker registry
service.
"""
yield from _docker_compose_secure(
docker_compose_files=docker_compose_files,
scale_factor=pdrf_scale_factor,
tmp_path_factory=tmp_path_factory,
)
def _docker_registry_auth_header(
*,
docker_registry_password_list: List[str],
docker_registry_username_list: List[str],
scale_factor: int,
) -> List[Dict[str, str]]:
"""Provides an HTTP basic authentication header containing credentials for the secure docker registry service."""
cache_key = _docker_registry_auth_header.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
auth = b64encode(
f"{docker_registry_username_list[i]}:{docker_registry_password_list[i]}".encode(
"utf-8"
)
).decode("utf-8")
result.append({"Authorization": f"Basic {auth}"})
CACHE[cache_key] = result
return result
@pytest.fixture(scope="session")
def docker_registry_auth_header(
docker_registry_password: str, docker_registry_username: str
) -> Dict[str, str]:
"""Provides an HTTP basic authentication header containing credentials for the secure docker registry service."""
return _docker_registry_auth_header(
docker_registry_password_list=[docker_registry_password],
docker_registry_username_list=[docker_registry_username],
scale_factor=1,
)[0]
@pytest.fixture(scope="session")
def docker_registry_auth_header_list(
docker_registry_password_list: List[str],
docker_registry_username_list: List[str],
pdrf_scale_factor: int,
) -> List[Dict[str, str]]:
"""Provides an HTTP basic authentication header containing credentials for the secure docker registry service."""
return _docker_registry_auth_header(
docker_registry_password_list=docker_registry_password_list,
docker_registry_username_list=docker_registry_username_list,
scale_factor=pdrf_scale_factor,
)
def _docker_registry_cacerts(
*,
docker_registry_certs_list: List[DockerRegistryCerts],
pytestconfig: "_pytest.config.Config",
scale_factor: int,
tmp_path_factory: TempPathFactory,
) -> Generator[List[Path], None, None]:
"""
Provides the location of a temporary CA certificate trust store that contains the certificate of the secure docker
registry service.
"""
cache_key = _docker_registry_cacerts.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
chain = itertools.chain(
get_user_defined_file(pytestconfig, "cacerts"),
generate_cacerts(
tmp_path_factory,
certificate=docker_registry_certs_list[i].ca_certificate,
),
)
for path in chain:
result.append(path)
break
else:
LOGGER.warning("Unable to find or generate cacerts!")
result.append("-unknown-")
CACHE[cache_key] = result
yield result
@pytest.fixture(scope="session")
def docker_registry_cacerts(
docker_registry_certs: DockerRegistryCerts,
pytestconfig: "_pytest.config.Config",
tmp_path_factory: TempPathFactory,
) -> Generator[Path, None, None]:
"""
Provides the location of a temporary CA certificate trust store that contains the certificate of the secure docker
registry service.
"""
for lst in _docker_registry_cacerts(
docker_registry_certs_list=[docker_registry_certs],
pytestconfig=pytestconfig,
scale_factor=1,
tmp_path_factory=tmp_path_factory,
):
yield lst[0]
@pytest.fixture(scope="session")
def docker_registry_cacerts_list(
docker_registry_certs_list: List[DockerRegistryCerts],
pdrf_scale_factor: int,
pytestconfig: "_pytest.config.Config",
tmp_path_factory: TempPathFactory,
) -> Generator[List[Path], None, None]:
"""
Provides the location of a temporary CA certificate trust store that contains the certificate of the secure docker
registry service.
"""
yield from _docker_registry_cacerts(
docker_registry_certs_list=docker_registry_certs_list,
pytestconfig=pytestconfig,
scale_factor=pdrf_scale_factor,
tmp_path_factory=tmp_path_factory,
)
def _docker_registry_certs(
*, scale_factor: int, tmp_path_factory: TempPathFactory
) -> Generator[List[DockerRegistryCerts], None, None]:
"""Provides the location of temporary certificate and private key files for the secure docker registry service."""
# TODO: Augment to allow for reading certificates from /test ...
cache_key = _docker_registry_certs.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
tmp_path = tmp_path_factory.mktemp(__name__)
keypair = generate_keypair()
docker_registry_cert = DockerRegistryCerts(
ca_certificate=tmp_path.joinpath(f"{DOCKER_REGISTRY_SERVICE}-ca-{i}.crt"),
ca_private_key=tmp_path.joinpath(f"{DOCKER_REGISTRY_SERVICE}-ca-{i}.key"),
certificate=tmp_path.joinpath(f"{DOCKER_REGISTRY_SERVICE}-{i}.crt"),
private_key=tmp_path.joinpath(f"{DOCKER_REGISTRY_SERVICE}-{i}.key"),
)
docker_registry_cert.ca_certificate.write_bytes(keypair.ca_certificate)
docker_registry_cert.ca_private_key.write_bytes(keypair.ca_private_key)
docker_registry_cert.certificate.write_bytes(keypair.certificate)
docker_registry_cert.private_key.write_bytes(keypair.private_key)
result.append(docker_registry_cert)
CACHE[cache_key] = result
yield result
for docker_registry_cert in result:
docker_registry_cert.ca_certificate.unlink(missing_ok=True)
docker_registry_cert.ca_private_key.unlink(missing_ok=True)
docker_registry_cert.certificate.unlink(missing_ok=True)
docker_registry_cert.private_key.unlink(missing_ok=True)
@pytest.fixture(scope="session")
def docker_registry_certs(
tmp_path_factory: TempPathFactory,
) -> Generator[DockerRegistryCerts, None, None]:
"""Provides the location of temporary certificate and private key files for the secure docker registry service."""
for lst in _docker_registry_certs(
scale_factor=1, tmp_path_factory=tmp_path_factory
):
yield lst[0]
@pytest.fixture(scope="session")
def docker_registry_certs_list(
pdrf_scale_factor: int, tmp_path_factory: TempPathFactory
) -> Generator[List[DockerRegistryCerts], None, None]:
"""Provides the location of temporary certificate and private key files for the secure docker registry service."""
yield from _docker_registry_certs(
scale_factor=pdrf_scale_factor, tmp_path_factory=tmp_path_factory
)
def _docker_registry_htpasswd(
*,
docker_registry_password_list: List[str],
docker_registry_username_list: List[str],
pytestconfig: "_pytest.config.Config",
scale_factor: int,
tmp_path_factory: TempPathFactory,
) -> Generator[List[Path], None, None]:
"""Provides the location of the htpasswd file for the secure registry service."""
cache_key = _docker_registry_htpasswd.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
chain = itertools.chain(
get_user_defined_file(pytestconfig, "htpasswd"),
generate_htpasswd(
tmp_path_factory,
username=docker_registry_username_list[i],
password=docker_registry_password_list[i],
),
)
for path in chain:
result.append(path)
break
else:
LOGGER.warning("Unable to find or generate htpasswd!")
result.append("-unknown-")
CACHE[cache_key] = result
yield result
@pytest.fixture(scope="session")
def docker_registry_htpasswd(
docker_registry_password: str,
docker_registry_username: str,
pytestconfig: "_pytest.config.Config",
tmp_path_factory: TempPathFactory,
) -> Generator[Path, None, None]:
"""Provides the location of the htpasswd file for the secure registry service."""
for lst in _docker_registry_htpasswd(
docker_registry_password_list=[docker_registry_password],
docker_registry_username_list=[docker_registry_username],
pytestconfig=pytestconfig,
scale_factor=1,
tmp_path_factory=tmp_path_factory,
):
yield lst[0]
@pytest.fixture(scope="session")
def docker_registry_htpasswd_list(
docker_registry_password_list: List[str],
docker_registry_username_list: List[str],
pdrf_scale_factor: int,
pytestconfig: "_pytest.config.Config",
tmp_path_factory: TempPathFactory,
) -> Generator[List[Path], None, None]:
"""Provides the location of the htpasswd file for the secure registry service."""
yield from _docker_registry_htpasswd(
docker_registry_username_list=docker_registry_username_list,
docker_registry_password_list=docker_registry_password_list,
pytestconfig=pytestconfig,
scale_factor=pdrf_scale_factor,
tmp_path_factory=tmp_path_factory,
)
def _docker_registry_insecure(
*,
docker_client: DockerClient,
docker_compose_insecure_list: List[Path],
docker_services: Services,
request,
scale_factor: int,
tmp_path_factory: TempPathFactory,
) -> Generator[List[DockerRegistryInsecure], None, None]:
"""Provides the endpoint of a local, mutable, insecure, docker registry."""
cache_key = _docker_registry_insecure.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
service_name = DOCKER_REGISTRY_SERVICE_PATTERN.format("insecure", i)
tmp_path = tmp_path_factory.mktemp(__name__)
# Create a secure registry service from the docker compose template ...
path_docker_compose = tmp_path.joinpath(f"docker-compose-{i}.yml")
template = Template(docker_compose_insecure_list[i].read_text("utf-8"))
path_docker_compose.write_text(
template.substitute(
{
"CONTAINER_NAME": service_name,
# Note: Needed to correctly populate the embedded, consolidated, service template ...
"PATH_CERTIFICATE": "/dev/null",
"PATH_HTPASSWD": "/dev/null",
"PATH_KEY": "/dev/null",
}
),
"utf-8",
)
LOGGER.debug("Starting insecure docker registry service [%d] ...", i)
LOGGER.debug(" docker-compose : %s", path_docker_compose)
LOGGER.debug(" service name : %s", service_name)
endpoint = start_service(
docker_services,
docker_compose=path_docker_compose,
service_name=service_name,
)
LOGGER.debug("Insecure docker registry endpoint [%d]: %s", i, endpoint)
images = []
if i == 0:
LOGGER.debug("Replicating images into %s [%d] ...", service_name, i)
images = _replicate_images(docker_client, endpoint, request)
result.append(
DockerRegistryInsecure(
docker_client=docker_client,
docker_compose=path_docker_compose,
endpoint=endpoint,
images=images,
service_name=service_name,
)
)
CACHE[cache_key] = result
yield result
@pytest.fixture(scope="session")
def docker_registry_insecure(
docker_client: DockerClient,
docker_compose_insecure: Path,
docker_services: Services,
request,
tmp_path_factory: TempPathFactory,
) -> Generator[DockerRegistryInsecure, None, None]:
"""Provides the endpoint of a local, mutable, insecure, docker registry."""
for lst in _docker_registry_insecure(
docker_client=docker_client,
docker_compose_insecure_list=[docker_compose_insecure],
docker_services=docker_services,
request=request,
scale_factor=1,
tmp_path_factory=tmp_path_factory,
):
yield lst[0]
@pytest.fixture(scope="session")
def docker_registry_insecure_list(
docker_client: DockerClient,
docker_compose_insecure_list: List[Path],
docker_services: Services,
pdrf_scale_factor: int,
request,
tmp_path_factory: TempPathFactory,
) -> Generator[List[DockerRegistryInsecure], None, None]:
"""Provides the endpoint of a local, mutable, insecure, docker registry."""
yield from _docker_registry_insecure(
docker_client=docker_client,
docker_compose_insecure_list=docker_compose_insecure_list,
docker_services=docker_services,
request=request,
scale_factor=pdrf_scale_factor,
tmp_path_factory=tmp_path_factory,
)
def _docker_registry_password(*, scale_factor: int) -> List[str]:
"""Provides the password to use for authentication to the secure registry service."""
cache_key = _docker_registry_password.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
result.append(f"pytest.password.{time()}")
sleep(0.05)
CACHE[cache_key] = result
return result
@pytest.fixture(scope="session")
def docker_registry_password() -> str:
"""Provides the password to use for authentication to the secure registry service."""
return _docker_registry_password(scale_factor=1)[0]
@pytest.fixture(scope="session")
def docker_registry_password_list(pdrf_scale_factor: int) -> List[str]:
"""Provides the password to use for authentication to the secure registry service."""
return _docker_registry_password(scale_factor=pdrf_scale_factor)
def _docker_registry_secure(
*,
docker_client: DockerClient,
docker_compose_secure_list: List[Path],
docker_registry_auth_header_list: List[Dict[str, str]],
docker_registry_cacerts_list: List[Path],
docker_registry_certs_list: List[DockerRegistryCerts],
docker_registry_htpasswd_list: List[Path],
docker_registry_password_list: List[str],
docker_registry_ssl_context_list: List[SSLContext],
docker_registry_username_list: List[str],
docker_services: Services,
request,
scale_factor: int,
tmp_path_factory: TempPathFactory,
) -> Generator[List[DockerRegistrySecure], None, None]:
"""Provides the endpoint of a local, mutable, secure, docker registry."""
cache_key = _docker_registry_secure.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
service_name = DOCKER_REGISTRY_SERVICE_PATTERN.format("secure", i)
tmp_path = tmp_path_factory.mktemp(__name__)
# Create a secure registry service from the docker compose template ...
path_docker_compose = tmp_path.joinpath(f"docker-compose-{i}.yml")
template = Template(docker_compose_secure_list[i].read_text("utf-8"))
path_docker_compose.write_text(
template.substitute(
{
"CONTAINER_NAME": service_name,
"PATH_CERTIFICATE": docker_registry_certs_list[i].certificate,
"PATH_HTPASSWD": docker_registry_htpasswd_list[i],
"PATH_KEY": docker_registry_certs_list[i].private_key,
}
),
"utf-8",
)
LOGGER.debug("Starting secure docker registry service [%d] ...", i)
LOGGER.debug(" docker-compose : %s", path_docker_compose)
LOGGER.debug(
" ca certificate : %s", docker_registry_certs_list[i].ca_certificate
)
LOGGER.debug(" certificate : %s", docker_registry_certs_list[i].certificate)
LOGGER.debug(" htpasswd : %s", docker_registry_htpasswd_list[i])
LOGGER.debug(" private key : %s", docker_registry_certs_list[i].private_key)
LOGGER.debug(" password : %s", docker_registry_password_list[i])
LOGGER.debug(" service name : %s", service_name)
LOGGER.debug(" username : %s", docker_registry_username_list[i])
check_server = partial(
check_url_secure,
auth_header=docker_registry_auth_header_list[i],
ssl_context=docker_registry_ssl_context_list[i],
)
endpoint = start_service(
docker_services,
check_server=check_server,
docker_compose=path_docker_compose,
service_name=service_name,
)
LOGGER.debug("Secure docker registry endpoint [%d]: %s", i, endpoint)
# DUCK PUNCH: Inject the secure docker registry credentials into the docker client ...
docker_client.api._auth_configs.add_auth( # pylint: disable=protected-access
endpoint,
{
"password": docker_registry_password_list[i],
"username": docker_registry_username_list[i],
},
)
images = []
if i == 0:
LOGGER.debug("Replicating images into %s [%d] ...", service_name, i)
images = _replicate_images(docker_client, endpoint, request)
result.append(
DockerRegistrySecure(
auth_header=docker_registry_auth_header_list[i],
cacerts=docker_registry_cacerts_list[i],
certs=docker_registry_certs_list[i],
docker_client=docker_client,
docker_compose=path_docker_compose,
endpoint=endpoint,
htpasswd=docker_registry_htpasswd_list[i],
password=docker_registry_password_list[i],
images=images,
service_name=service_name,
ssl_context=docker_registry_ssl_context_list[i],
username=docker_registry_username_list[i],
)
)
CACHE[cache_key] = result
yield result
@pytest.fixture(scope="session")
def docker_registry_secure(
docker_client: DockerClient,
docker_compose_secure: Path,
docker_registry_auth_header: Dict[str, str],
docker_registry_cacerts: Path,
docker_registry_certs: DockerRegistryCerts,
docker_registry_htpasswd: Path,
docker_registry_password: str,
docker_registry_ssl_context: SSLContext,
docker_registry_username: str,
docker_services: Services,
request,
tmp_path_factory: TempPathFactory,
) -> Generator[DockerRegistrySecure, None, None]:
"""Provides the endpoint of a local, mutable, secure, docker registry."""
for lst in _docker_registry_secure(
docker_client=docker_client,
docker_compose_secure_list=[docker_compose_secure],
docker_registry_auth_header_list=[docker_registry_auth_header],
docker_registry_cacerts_list=[docker_registry_cacerts],
docker_registry_certs_list=[docker_registry_certs],
docker_registry_htpasswd_list=[docker_registry_htpasswd],
docker_registry_password_list=[docker_registry_password],
docker_registry_ssl_context_list=[docker_registry_ssl_context],
docker_registry_username_list=[docker_registry_username],
docker_services=docker_services,
request=request,
scale_factor=1,
tmp_path_factory=tmp_path_factory,
):
yield lst[0]
@pytest.fixture(scope="session")
def docker_registry_secure_list(
docker_client: DockerClient,
docker_compose_secure_list: List[Path],
docker_registry_auth_header_list: List[Dict[str, str]],
docker_registry_cacerts_list: List[Path],
docker_registry_certs_list: List[DockerRegistryCerts],
docker_registry_htpasswd_list: List[Path],
docker_registry_password_list: List[str],
docker_registry_ssl_context_list: List[SSLContext],
docker_registry_username_list: List[str],
docker_services: Services,
pdrf_scale_factor: int,
request,
tmp_path_factory: TempPathFactory,
) -> Generator[List[DockerRegistrySecure], None, None]:
"""Provides the endpoint of a local, mutable, secure, docker registry."""
yield from _docker_registry_secure(
docker_client=docker_client,
docker_compose_secure_list=docker_compose_secure_list,
docker_registry_auth_header_list=docker_registry_auth_header_list,
docker_registry_cacerts_list=docker_registry_cacerts_list,
docker_registry_certs_list=docker_registry_certs_list,
docker_registry_htpasswd_list=docker_registry_htpasswd_list,
docker_registry_password_list=docker_registry_password_list,
docker_registry_ssl_context_list=docker_registry_ssl_context_list,
docker_registry_username_list=docker_registry_username_list,
docker_services=docker_services,
request=request,
scale_factor=pdrf_scale_factor,
tmp_path_factory=tmp_path_factory,
)
def _docker_registry_ssl_context(
*, docker_registry_cacerts_list: List[Path], scale_factor: int
) -> List[SSLContext]:
"""
Provides an SSLContext referencing the temporary CA certificate trust store that contains the certificate of the
secure docker registry service.
"""
cache_key = _docker_registry_ssl_context.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
result.append(
create_default_context(cafile=str(docker_registry_cacerts_list[i]))
)
CACHE[cache_key] = result
return result
@pytest.fixture(scope="session")
def docker_registry_ssl_context(docker_registry_cacerts: Path) -> SSLContext:
"""
Provides an SSLContext referencing the temporary CA certificate trust store that contains the certificate of the
secure docker registry service.
"""
return _docker_registry_ssl_context(
docker_registry_cacerts_list=[docker_registry_cacerts], scale_factor=1
)[0]
@pytest.fixture(scope="session")
def docker_registry_ssl_context_list(
docker_registry_cacerts_list: List[Path],
pdrf_scale_factor: int,
) -> List[SSLContext]:
"""
Provides an SSLContext referencing the temporary CA certificate trust store that contains the certificate of the
secure docker registry service.
"""
return _docker_registry_ssl_context(
docker_registry_cacerts_list=docker_registry_cacerts_list,
scale_factor=pdrf_scale_factor,
)
def _docker_registry_username(*, scale_factor: int) -> List[str]:
"""Retrieve the name of the user to use for authentication to the secure registry service."""
cache_key = _docker_registry_username.__name__
result = CACHE.get(cache_key, [])
for i in range(scale_factor):
if i < len(result):
continue
result.append(f"pytest.username.{time()}")
sleep(0.05)
CACHE[cache_key] = result
return result
@pytest.fixture(scope="session")
def docker_registry_username() -> str:
"""Retrieve the name of the user to use for authentication to the secure registry service."""
return _docker_registry_username(scale_factor=1)[0]
@pytest.fixture(scope="session")
def docker_registry_username_list(
pdrf_scale_factor: int,
) -> List[str]:
"""Retrieve the name of the user to use for authentication to the secure registry service."""
return _docker_registry_username(scale_factor=pdrf_scale_factor)
@pytest.fixture(scope="session")
def pdrf_scale_factor() -> int:
"""Provides the number enumerated instances to be instantiated."""
return 1
def _replicate_images(
docker_client: DockerClient, endpoint: str, request
) -> List[ImageName]:
"""
Replicates all marked images to a docker registry service at a given endpoint.
Args:
docker_client: Docker client with which to replicate the marked images.
endpoint: The endpoint of the docker registry service.
request: The pytest requests object from which to retrieve the marks.
Returns: The list of images that were replicated.
"""
always_pull = strtobool(str(request.config.getoption("--always-pull", True)))
images = request.config.getoption("--push-image", [])
# images.extend(request.node.get_closest_marker("push_image", []))
# * Split ',' separated lists
# * Remove duplicates - see conftest.py::pytest_collection_modifyitems()
images = [image for i in images for image in i.split(",")]
images = [ImageName.parse(image) for image in list(set(images))]
for image in images:
LOGGER.debug("- %s", image)
try:
replicate_image(docker_client, image, endpoint, always_pull=always_pull)
except Exception as exception: # pylint: disable=broad-except
LOGGER.warning(
"Unable to replicate image '%s': %s", image, exception, exc_info=True
)
return images
| 31,253 | 9,473 |
import socket
import threading
import FetchData
class TCPserver():
def __init__(self):
self.server_ip="localhost"
self.server_port=9998
def main(self):
server = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
server.bind((self.server_ip,self.server_port))
server.listen(6)
print(f'Server listen on {self.server_ip} : Port:{self.server_port}')
while True:
cleint , address = server.accept()
print(f'[+] Accepted conneciton from {address[0]} : {address[1]}')
cleint_handler = threading.Thread(target=self.handle_client , args=(cleint,))
cleint_handler.start()
def handle_client(self,client_socket):
with client_socket as sock:
request = sock.recv(1024)
toFindInDatabase = request.decode()
print('[*] Received Data From Cleint:',toFindInDatabase)
receivedFromDatabase = self.toFind(toFindInDatabase)
toSend=bytes(receivedFromDatabase,'utf-8')
sock.send(toSend)
def toFind(self,toFindInDatabase):
db =FetchData.DatabaseClass(toFindInDatabase)
DBdata=db.databaseMethod()
return DBdata
if __name__ == "__main__":
while True:
server =TCPserver()
server.main() | 1,346 | 428 |
#source code: https://github.com/alvarobartt/trendet
import psycopg2, psycopg2.extras
import os
import glob
import csv
import time
import datetime
import string
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import patches
from matplotlib.pyplot import figure
from datetime import timedelta, date
from math import ceil, sqrt
from statistics import mean
from unidecode import unidecode
# transform array to rectangle shape
def trans2rect(arr):
tarr = []
trend = arr[0]
width = 1
day = 0
for elm in arr[1:]:
if elm == trend:
width += 1
else:
tarr.append((trend, day, width))
trend = elm
day += width
width = 1
tarr.append((trend, day, width))
return tarr
def date_range(start_date, end_date):
for n in range(int ((end_date - start_date).days)):
yield start_date + timedelta(n)
def identify_df_trends(df, column, window_size=5, identify='both'):
"""
This function receives as input a pandas.DataFrame from which data is going to be analysed in order to
detect/identify trends over a certain date range. A trend is considered so based on the window_size, which
specifies the number of consecutive days which lead the algorithm to identify the market behaviour as a trend. So
on, this function will identify both up and down trends and will remove the ones that overlap, keeping just the
longer trend and discarding the nested trend.
Args:
df (:obj:`pandas.DataFrame`): dataframe containing the data to be analysed.
column (:obj:`str`): name of the column from where trends are going to be identified.
window_size (:obj:`window`, optional): number of days from where market behaviour is considered a trend.
identify (:obj:`str`, optional):
which trends does the user wants to be identified, it can either be 'both', 'up' or 'down'.
Returns:
:obj:`pandas.DataFrame`:
The function returns a :obj:`pandas.DataFrame` which contains the retrieved historical data from Investing
using `investpy`, with a new column which identifies every trend found on the market between two dates
identifying when did the trend started and when did it end. So the additional column contains labeled date
ranges, representing both bullish (up) and bearish (down) trends.
Raises:
ValueError: raised if any of the introduced arguments errored.
"""
if df is None:
raise ValueError("df argument is mandatory and needs to be a `pandas.DataFrame`.")
if not isinstance(df, pd.DataFrame):
raise ValueError("df argument is mandatory and needs to be a `pandas.DataFrame`.")
if column is None:
raise ValueError("column parameter is mandatory and must be a valid column name.")
if column and not isinstance(column, str):
raise ValueError("column argument needs to be a `str`.")
if isinstance(df, pd.DataFrame):
if column not in df.columns:
raise ValueError("introduced column does not match any column from the specified `pandas.DataFrame`.")
else:
if df[column].dtype not in ['int64', 'float64']:
raise ValueError("supported values are just `int` or `float`, and the specified column of the "
"introduced `pandas.DataFrame` is " + str(df[column].dtype))
if not isinstance(window_size, int):
raise ValueError('window_size must be an `int`')
if isinstance(window_size, int) and window_size < 3:
raise ValueError('window_size must be an `int` equal or higher than 3!')
if not isinstance(identify, str):
raise ValueError('identify should be a `str` contained in [both, up, down]!')
if isinstance(identify, str) and identify not in ['both', 'up', 'down']:
raise ValueError('identify should be a `str` contained in [both, up, down]!')
objs = list()
up_trend = {
'name': 'Up Trend',
'element': np.negative(df['close'])
}
down_trend = {
'name': 'Down Trend',
'element': df['close']
}
if identify == 'both':
objs.append(up_trend)
objs.append(down_trend)
elif identify == 'up':
objs.append(up_trend)
elif identify == 'down':
objs.append(down_trend)
#print(objs)
results = dict()
for obj in objs:
mov_avg = None
values = list()
trends = list()
for index, value in enumerate(obj['element'], 0):
# print(index)
# print(value)
if mov_avg and mov_avg > value:
values.append(value)
mov_avg = mean(values)
elif mov_avg and mov_avg < value:
if len(values) > window_size:
min_value = min(values)
for counter, item in enumerate(values, 0):
if item == min_value:
break
to_trend = from_trend + counter
trend = {
'from': df.index.tolist()[from_trend],
'to': df.index.tolist()[to_trend],
}
trends.append(trend)
mov_avg = None
values = list()
else:
from_trend = index
values.append(value)
mov_avg = mean(values)
results[obj['name']] = trends
# print(results)
# print("\n\n")
# deal with overlapping labels, keep longer trends
if identify == 'both':
up_trends = list()
for up in results['Up Trend']:
flag = True
for down in results['Down Trend']:
if (down['from'] <= up['from'] <= down['to']) or (down['from'] <= up['to'] <= down['to']):
#print("up")
if (up['to'] - up['from']) <= (down['to'] - down['from']):
#print("up")
flag = False
for other_up in results['Up Trend']:
if (other_up['from'] < up['from'] < other_up['to']) or (other_up['from'] < up['to'] < other_up['to']):
#print("up")
if (up['to'] - up['from']) < (other_up['to'] - other_up['from']):
#print("up")
flag = False
if flag is True:
up_trends.append(up)
labels = [letter for letter in string.printable[:len(up_trends)]]
for up_trend, label in zip(up_trends, labels):
for index, row in df[up_trend['from']:up_trend['to']].iterrows():
df.loc[index, 'Up Trend'] = label
down_trends = list()
for down in results['Down Trend']:
flag = True
for up in results['Up Trend']:
if (up['from'] <= down['from'] <= up['to']) or (up['from'] <= down['to'] <= up['to']):
#print("down")
if (up['to'] - up['from']) >= (down['to'] - down['from']):
#print("down")
flag = False
for other_down in results['Down Trend']:
if (other_down['from'] < down['from'] < other_down['to']) or (other_down['from'] < down['to'] < other_down['to']):
#print("down")
if (other_down['to'] - other_down['from']) > (down['to'] - down['from']):
#print("down")
flag = False
if flag is True:
down_trends.append(down)
labels = [letter for letter in string.printable[:len(down_trends)]]
for down_trend, label in zip(down_trends, labels):
for index, row in df[down_trend['from']:down_trend['to']].iterrows():
df.loc[index, 'Down Trend'] = label
return df
elif identify == 'up':
up_trends = results['Up Trend']
up_labels = [letter for letter in string.printable[:len(up_trends)]]
for up_trend, up_label in zip(up_trends, up_labels):
for index, row in df[up_trend['from']:up_trend['to']].iterrows():
df.loc[index, 'Up Trend'] = up_label
return df
elif identify == 'down':
down_trends = results['Down Trend']
down_labels = [letter for letter in string.printable[:len(down_trends)]]
for down_trend, down_label in zip(down_trends, down_labels):
for index, row in df[down_trend['from']:down_trend['to']].iterrows():
df.loc[index, 'Down Trend'] = down_label
return df
conn = psycopg2.connect(**eval(open('auth.txt').read()))
cmd = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
start_date = date(2010, 3, 25)
end_date = date(2010, 3, 26)
# sampling window
window_size = 5
for single_date in date_range(start_date, end_date):
#smp no volume
#cmd.execute('select * from market_index where mid = 3 and dt=%(dt)s',dict(dt=single_date.strftime("%Y-%m-%d")))
#smp with volume
cmd.execute('select * from market_index where mid = 1 and dt=%(dt)s',dict(dt=single_date.strftime("%Y-%m-%d")))
recs = cmd.fetchall()
if recs == []:
continue;
df = pd.DataFrame(recs, columns = recs[0].keys())
df.sort_values(by='dt')
# with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# print(df)
close_price = df['close'].values
maxprice = max(close_price)
minprice = min(close_price)
# prevent from equal to 0
df['close'] = (df['close']-minprice)/(maxprice - minprice)+0.01
close_price = df['close'].values
# close_price = close_price.tolist()
# df_trend = df.copy()
# df_trend['Up Trend'] = np.nan
# df_trend['Down Trend'] = np.nan
df_trend = identify_df_trends(df, 'close', window_size=window_size, identify='both')
# with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# print(df_trend)
df.reset_index(inplace=True)
figure(num=None, figsize=(48, 10), dpi=180, facecolor='w', edgecolor='k')
ax = sns.lineplot(x=df.index, y=df['close'])
ax.set(xlabel='minute')
a=0
b=0
try:
labels = df_trend['Up Trend'].dropna().unique().tolist()
except:
df_trend['Up Trend'] = np.nan
a=1
if a == 0:
for label in labels:
ax.axvspan(df[df['Up Trend'] == label].index[0], df[df['Up Trend'] == label].index[-1], alpha=0.2, color='red')
try:
labels = df_trend['Down Trend'].dropna().unique().tolist()
except:
df_trend['Down Trend'] = np.nan
b=1
if b == 0:
for label in labels:
ax.axvspan(df[df['Down Trend'] == label].index[0], df[df['Down Trend'] == label].index[-1], alpha=0.2, color='green')
plt.savefig('date='+single_date.strftime("%m-%d-%Y")+'_window={}.png'.format(window_size))
| 11,122 | 3,438 |
class AbstractSummarizerAlgo(object):
"""
AbstractSummarizerAlgo defines the run method that every text summarization must implement.
"""
def run(self, text, percentage):
raise NotImplementedError('Subclasses must override run()!')
| 257 | 67 |
import requests
import json
content = None
with open("scored_output.json") as file:
content = json.load(file)
matrix = [[0 for i in range(len(content))] for j in range(len(content))]
mapping = {}
for i, origin in enumerate(content):
mapping[i] = origin
for j, destination in enumerate(content):
print(i, j)
if origin[0] == ',' or destination[0] == ',' or origin[-2:] != destination[-2:] or origin[-2:] != 'CA':
continue
response = requests.get("https://maps.googleapis.com/maps/api/distancematrix/json?units=imperial&origins=" + origin + "&destinations=" + destination + "&key=" + "AIzaSyA3kdX2kwoRQpkmui8GtloGvGQB-rn1tMU")
try:
matrix[i][j] = json.loads(response.content)["rows"][0]["elements"][0]["distance"]["value"]
except:
continue
data = {
'mapping': mapping,
'matrix': matrix
}
with open("dmatrix.json", "w") as file:
json.dump(data, file)
| 978 | 326 |
'''
__author__: Ellen Wu (modified by Jiaming Shen)
__description__: A bunch of utility functions
__latest_update__: 08/31/2017
'''
from collections import defaultdict
import set_expan
import eid_pair_TFIDF_selection
import extract_seed_edges
import extract_entity_pair_skipgrams
def loadEidToEntityMap(filename):
eid2ename = {}
ename2eid = {}
with open(filename, 'r') as fin:
for line in fin:
seg = line.strip('\r\n').split('\t')
eid2ename[int(seg[1])] = seg[0]
ename2eid[seg[0].lower()] = int(seg[1])
return eid2ename, ename2eid
def loadFeaturesAndEidMap(filename):
featuresetByEid = defaultdict(set)
eidsByFeature = defaultdict(set)
with open(filename, 'r') as fin:
for line in fin:
seg = line.strip('\r\n').split('\t')
eid = int(seg[0])
feature = seg[1]
featuresetByEid[eid].add(feature)
eidsByFeature[feature].add(eid)
return featuresetByEid, eidsByFeature
def loadFeaturesAndEidPairMap(filename):
featuresetByEidPair = defaultdict(set)
eidPairsByFeature = defaultdict(set)
with open(filename, 'r') as fin:
for line in fin:
seg = line.strip('\r\n').split('\t')
eidPair = (int(seg[0]), int(seg[1]))
feature = seg[2]
featuresetByEidPair[eidPair].add(feature)
eidPairsByFeature[feature].add(eidPair)
return featuresetByEidPair, eidPairsByFeature
def loadWeightByEidAndFeatureMap(filename, idx = -1):
''' Load the (eid, feature) -> strength
:param filename:
:param idx: The index column of weight, default is the last column
:return:
'''
weightByEidAndFeatureMap = {}
with open(filename, 'r') as fin:
for line in fin:
seg = line.strip('\r\n').split('\t')
eid = int(seg[0])
feature = seg[1]
weight = float(seg[idx])
weightByEidAndFeatureMap[(eid, feature)] = weight
return weightByEidAndFeatureMap
def loadWeightByEidPairAndFeatureMap(filename, idx = -1):
''' Load the ((eid1, eid2), feature) -> strength
:param filename:
:param idx: The index column of weight, default is the last column
:return:
'''
weightByEidPairAndFeatureMap = {}
with open(filename, 'r') as fin:
for line in fin:
seg = line.strip('\r\n').split('\t')
eidPair = (int(seg[0]), int(seg[1]))
feature = seg[2]
weight = float(seg[idx])
weightByEidPairAndFeatureMap[(eidPair, feature)] = weight
return weightByEidPairAndFeatureMap | 2,416 | 853 |
#!/usr/bin/env pytest-3
import pytest
# Exercice: iter
def multiples_of(n):
i = 0
while True:
yield i
i += n
# test
def test_iter():
gen = multiples_of(3)
for n, mult in enumerate(gen):
assert n * 3 == mult
if n >= 100:
break
for n, mult in enumerate(gen):
assert (n + 101) * 3 == mult
if n >= 100:
break
gen = multiples_of(4)
for n, mult in enumerate(gen):
assert n * 4 == mult
if n >= 100:
break
| 534 | 197 |
from onegov.gis.forms.fields import CoordinatesField
from onegov.gis.forms.widgets import CoordinatesWidget
__all__ = ['CoordinatesField', 'CoordinatesWidget']
| 161 | 48 |
"""Release Planning."""
import argparse
import github3
import logging
import os
import sys
import traceback
from pds_github_util.issues.utils import get_labels, is_theme
from pds_github_util.zenhub.zenhub import Zenhub
from pds_github_util.utils import GithubConnection, addStandardArguments
from pkg_resources import resource_string
from jinja2 import Template
from yaml import FullLoader, load
# PDS Github Org
GITHUB_ORG = 'NASA-PDS'
REPO_INFO = ('\n--------\n\n'
'{}\n'
'{}\n\n'
'*{}*\n\n'
'.. list-table:: \n'
' :widths: 15 15 15 15 15 15\n\n'
' * - `User Guide <{}>`_\n'
' - `Github Repo <{}>`_\n'
' - `Issue Tracking <{}/issues>`_ \n'
' - `Backlog <{}/issues?q=is%3Aopen+is%3Aissue+label%3Abacklog>`_ \n'
' - `Stable Release <{}/releases/latest>`_ \n'
' - `Dev Release <{}/releases>`_ \n\n')
# Quiet github3 logging
logger = logging.getLogger('github3')
logger.setLevel(level=logging.WARNING)
# Enable logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def append_to_project(proj, output):
if 'output' in proj.keys():
proj['output'] += output
else:
proj['output'] = output
def get_project(projects, gh_issue, labels):
intersection = list(set(projects.keys()) & set(labels))
if intersection:
return projects[intersection[0]]
else:
raise Exception(f"Unknown project for theme '{gh_issue.title}': {labels}")
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__)
addStandardArguments(parser)
parser.add_argument('--github_token',
help='github API token')
parser.add_argument('--zenhub_token',
help='zenhub API token')
parser.add_argument('--build_number',
help='build number',
required=True)
parser.add_argument('--delivery_date',
help='EN delivery to I&T date',
required=True)
parser.add_argument('--trr_date',
help='EN TRR date',
required=True)
parser.add_argument('--ddr_date',
help='EN DDR date',
required=True)
parser.add_argument('--release_date',
help='EN DDR date',
required=True)
parser.add_argument('--projects_config',
help='Path to config file with project information',
required=True)
args = parser.parse_args()
# set output filename
output_fname = f'plan.rst'
# get github token or throw error
github_token = args.github_token or os.environ.get('GITHUB_TOKEN')
if not github_token:
logger.error(f'github API token must be provided or set as environment'
' variable (GITHUB_TOKEN).')
sys.exit(1)
# get zenhub token or throw error
zenhub_token = args.github_token or os.environ.get('ZENHUB_TOKEN')
if not zenhub_token:
logger.error(f'zenhub API token must be provided or set as environment'
' variable (ZENHUB_TOKEN).')
sys.exit(1)
try:
gh = GithubConnection.getConnection(token=github_token)
org = gh.organization(GITHUB_ORG)
repos = org.repositories()
issues = []
repo_dict = {}
zen = Zenhub(zenhub_token)
for repo in repos:
if not issues:
issues = zen.get_issues_by_release(repo.id, f'B{args.build_number}')
repo_dict[repo.id] = {'repo': repo,
'issues': []}
# Build up dictionary of repos + issues in release
issue_dict = {}
for issue in issues:
repo_dict[issue['repo_id']]['issues'].append(issue['issue_number'])
# Create project-based dictionary
with open(args.projects_config) as _file:
_conf = load(_file, Loader=FullLoader)
# get project info
projects = _conf['projects']
# get key dates info
key_dates = _conf['key_dates']
# Loop through repos
plan_output = ''
maintenance_output = ''
ddwg_plans = ''
for repo_id in repo_dict:
r = repo_dict[repo_id]['repo']
issues = repo_dict[repo_id]['issues']
repo_output = ''
if issues:
for issue_num in issues:
gh_issue = gh.issue(org.login, repo_dict[repo_id]['repo'].name, issue_num)
zen_issue = zen.issue(repo_id, issue_num)
# we only want release themes in the plan (is_epic + label:theme)
labels = get_labels(gh_issue)
# Custom handling for pds4-information-model SCRs
if 'CCB-' in gh_issue.title:
ddwg_plans += f'* `{r.name}#{issue_num} <{gh_issue.html_url}>`_ **{gh_issue.title}**\n'
elif is_theme(labels, zen_issue):
repo_output += f'* `{r.name}#{issue_num} <{gh_issue.html_url}>`_ **{gh_issue.title}**\n'
# proj_id = get_project(projects, gh_issue, labels)
# append_to_project(projects[proj_id], f'* `{r.name}#{issue_num} <{gh_issue.html_url}>`_ **{gh_issue.title}**\n')
for child in zen.get_epic_children(gh, org, repo_id, issue_num):
child_repo = child['repo']
child_issue = child['issue']
repo_output += f' * `{child_repo.name}#{child_issue.number} <{child_issue.html_url}>`_ {child_issue.title}\n'
# append_to_project(projects[proj_id], f' * `{child_repo.name}#{child_issue.number} <{child_issue.html_url}>`_ {child_issue.title}\n')
# print(repo_output)
repo_info = REPO_INFO.format(r.name,
'#' * len(r.name),
r.description,
r.homepage or r.html_url + '#readme',
r.html_url,
r.html_url,
r.html_url,
r.html_url,
r.html_url)
# only output the header
if repo_output:
plan_output += repo_info
plan_output += repo_output
with open(output_fname, 'w') as f_out:
template_kargs = {
'output': output_fname,
'build_number': args.build_number,
'scr_date': key_dates['scr_date'],
'doc_update_date': key_dates['doc_update_date'],
'delivery_date': key_dates['delivery_date'],
'trr_date': key_dates['trr_date'],
'beta_test_date': key_dates['beta_test_date'],
'dldd_int_date': key_dates['dldd_int_date'],
'doc_review_date': key_dates['doc_review_date'],
'ddr_date': key_dates['ddr_date'],
'release_date': key_dates['release_date'],
'pds4_changes': ddwg_plans,
'planned_changes': plan_output
}
template = Template(resource_string(__name__, 'plan.template.rst').decode("utf-8"))
rst_str = template.render(template_kargs)
f_out.write(rst_str)
# else:
# maintenance_output += repo_info
# print(f'## {r.name}')
# print(f'Description: {r.description}')
# print(f'User Guide: {r.homepage}')
# print(f'Github Repo: {r.html_url}')
# print(f'Issue Tracker: {r.html_url}/issues')
# print(repo_dict[repo_id]['repo'].name)
# print(repo_dict[repo_id]['issues'])
# print(repo_dict)
# for repo in repos:
except Exception as e:
traceback.print_exc()
sys.exit(1)
logger.info(f'SUCCESS: Release Plan generated successfully.')
if __name__ == '__main__':
main()
| 8,468 | 2,531 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.models import AbstractUser
from django.db import models
# Create your models here.
class User(AbstractUser):
"""
Bu model kullanıcılar için kullanılacaktır.
"""
adres = models.CharField(max_length=255, null=True, blank=True)
telefon = models.CharField(max_length=255, null=True, blank=True)
biografi = models.TextField()
github = models.URLField()
linkedin = models.URLField()
resim = models.ImageField(upload_to='KullaniciResimleri/%Y/%m/%d', default='KullaniciResimleri/resim.png') | 612 | 213 |
"""
Aravind Veerappan
BNFO 601 - Exam 2
Question 2. Protein BLAST
"""
import math
from PAM import PAM
class BLAST(object):
FORWARD = 1 # These are class variables shared by all instances of the BLAST class
BACKWARD = -1
ROW = (0, 1)
COLUMN = (1, 0)
def __init__(self, query=None, target=None, word_size=3, gap_open=-10, gap_extend=-4, threshold=10, PAM=None):
self.query = query # This is the string corresponding to the query sequence
self.target = target # This is the string corresponding to the target sequence
self.word_size = word_size # Size of the seed word for initiating extensions
self.word_score = None # something different required for PBLAST!
self.gap_open = gap_open
self.gap_extend = gap_extend
self.querylen = len(query)
self.targetlen = len(target)
self.blast_table = {} # Our main dynamic programming table containing scores
self.traceback_table = {} # A corresponding table for recording the tracebacks
self.target_index = {}
self.threshold = threshold # Neighborhood threshold value for scoring
self.PAM = PAM # PAM table
return
def score(self): # This method performs BLAST scoring and returns a string describing the resulting alignment
result_summary = [] # A list, for now, that will store results of the alignments
if not self.target_index: # if this is the first time scoring we should index the target
for i in xrange(len(self.target) - self.word_size + 1):
word = self.target[i: i + self.word_size]
if word in self.target_index:
self.target_index[word].append(i) # A dict of lists is an efficient structure for this index.
# The list items are word coordinates in the target.
else:
self.target_index[word] = [i]
# print self.target_index
## First we must iterate through words in the query:
query_position = 0
while query_position < self.querylen - self.word_size + 1:
# print "Query position is", query_position
query_word = self.query[query_position:query_position + self.word_size]
# lookup scores for each AA pair from PAM table
for target_word in self.target_index.keys():
score = 0
for i in range(len(target_word)):
score += self.PAM[target_word[i], query_word[i]]
# If the calculated score is higher than the neighborhood threshold value then extend the alignment
# and set the starting word score equal to the calculated score
if score > self.threshold:
self.word_score = score
for target_position in self.target_index[target_word]:
print "Searching for seed", query_word, "at target position", target_position
# print "Extending forward"
forward_score, forward_extension_q, forward_extension_t = \
self._extend_alignment(query_position, target_position, self.FORWARD)
# print "Extending backwards"
backward_score, backward_extension_q, backward_extension_t = \
self._extend_alignment(query_position, target_position, self.BACKWARD)
q_result = backward_extension_q[:-1] + query_word + forward_extension_q[1:]
t_result = backward_extension_t[:-1] + query_word + forward_extension_t[1:]
# Note that the last character of a backward extension, and the zeroth character of a forward
# extension overlap with the query word and should therefore be discarded - thus the slice notation.
score = forward_score + backward_score - self.word_score
# We need to make sure that we don't double count the seed score!
# calculate e-value
# e_value = self.querylen * self.targetlen * math.e ** (math.log(1 / 4) * score)
# calculate bit score
# bit_score = (-math.log(1 / 4) * score - math.log(1)) / math.log(2)
query_begin = query_position - len(backward_extension_q) + 2
target_begin = target_position - len(backward_extension_t) + 2
# result_summary.append((e_value, bit_score, score, q_result, t_result, query_begin, target_begin))
result_summary.append((score, q_result, t_result, query_begin, target_begin))
alignment_string = '\nAlignment had a score of ' + str(score) + ' and is:\n\nTarget:\t' + \
str(target_begin) + '\t' + str(t_result) + '\n\t\t\t'
for k in xrange(len(t_result)): # t and q alignments should be the same length!
if t_result[k] == q_result[k]:
alignment_string += '|'
# Only put a bar if the two characters are identical at this position
else:
alignment_string += ' ' # otherwise just insert a space
alignment_string += '\nQuery:\t' + str(query_begin) + '\t' + str(q_result) + '\n'
print alignment_string
# The above statements just concatenate together a multi-line string that will correctly display
# the best alignment when it is subsequently printed.
query_position += 1
return result_summary
def _extend_alignment(self, query_start, target_start, direction):
""" This private method attempts to extend an alignment in the forward and backward direction
depending on the value of the direction flag, which here takes the value 1 (for forward extension) or
-1 for backward.For clarity these constants are defined by the class variables self.FORWARD and self.BACKWARD
"""
self.high_score = self.word_score
# highest scores encountered so far will always initially be the word_score * match_reward
self.high_q_pos = self.high_t_pos = 0
if direction == self.FORWARD: # We start with the 0,0 position representing the last character
query_start += self.word_size - 1 # of the seed word for forward extensions.
target_start += self.word_size - 1 # For backward extensions, leave it as it is (i.e. zeroth character)
self.blast_table = dict()
# The BLAST table is a dict of tuples. Each tuple represents a (query, target) position
# this sparse representation will be much more efficient than using a 2D list
self.blast_table[0, 0] = self.high_score # initialize the top left corner with the word score
self.high_q_pos = 0
self.high_t_pos = 0
self.traceback_table[0, 0] = (1, 1)
# There is no traceback path for the origin, but the program logic elsewhere dictates that we provide one
cur_t_pos = 1 # we are going to score the edges first (top and left), which can *only* ever be gaps back
# to the origin. i.e. the question of matching or not matching is completely irrelevant here.
# We start by scoring the top edge, beginning with position 1..
cur_score = max(0, self.blast_table[(0, 0)] + self.gap_open) # first one always a gap open
while cur_score: # only keep going as long as we have non-zero values
self.blast_table[(0, cur_t_pos)] = cur_score # only record non-zero values
self.traceback_table[(0, cur_t_pos)] = (0, 1) # record a target gap in the traceback table
cur_score = max(0, self.blast_table[(0, cur_t_pos)] + self.gap_extend) # any subsequent are extends
cur_t_pos += 1
cur_t_pos = 0 # Now we do the same thing for the left edge as we just did for the top edge
cur_q_pos = 1
cur_score = max(0, self.blast_table[(0, 0)] + self.gap_open) # first one always a gap open
while cur_score: # only keep going as long as we have non-zero values
self.blast_table[(cur_q_pos, 0)] = cur_score # only record non-zero values
self.traceback_table[(cur_q_pos, 0)] = (1, 0) # record a query gap in the traceback table
cur_score = max(0, self.blast_table[(cur_q_pos, 0)] + self.gap_extend)
cur_t_pos += 1
# print "blast table 0,0 is", self.blast_table[0, 0], "and high score is", self.high_score
# alright, finished with edges. Note that high scores can NEVER occur in an edge so these were not considered.
# Henceforth, however, we will need to think about this.
cur_t_pos = 0 # Start at the first position
cur_q_pos = 0
# Now we will score the table, proceeding according to the algorithm description: first incrementing along
# the diagonal, then scoring the adjacent row, then the column below
# Unlike Smith Waterman, the matrix is no longer of defined size, so we need to use while loops instead of for
while True: # I think it's cleaner to affirmatively break out of this main loop. Too bad Python has no do-while
cur_t_pos += 1 # Advance along the diagonal by incrementing
cur_q_pos += 1 # Remember, these refer to coordinates in our table, not in the actual target or query
# Probably we need to do some bounds checking here too with respect to absolute position in the query and
# target similar to what is done in the _fill_in_row_or_column method
# print "Beginning row starting at", cur_q_pos, cur_t_pos, "of the blast table"
max_in_row = self._fill_in_row_or_column(cur_q_pos, cur_t_pos, query_start, target_start,
direction, self.ROW)
# print "Max in row was ", max_in_row
# print "Beginning column starting at", cur_q_pos, cur_t_pos, "of the blast table"
max_in_column = self._fill_in_row_or_column(cur_q_pos, cur_t_pos, query_start,
target_start, direction, self.COLUMN)
# print "Max in column was ", max_in_column
if not max(max_in_row, max_in_column):
break # If the maximum value we encounter in both the rows and columns is zero, we are done building
# print "Finished building a matrix"
best_q_alignment = [] # best partial alignment for the query sequence
best_t_alignment = [] # best partial alignment for the target sequence
## Now we can go ahead and produce an output string corresponding to the best alignment
cur_q_pos = self.high_q_pos # our approach is start at the high scoring box, and to trace our way back
cur_t_pos = self.high_t_pos
while cur_q_pos >= 0 and cur_t_pos >= 0 and self.blast_table.setdefault((cur_q_pos, cur_t_pos), 0):
q_offset, t_offset = self.traceback_table[cur_q_pos, cur_t_pos]
# unpack the offset tuples stored in the traceback table
if q_offset:
try:
best_q_alignment.append(self.query[query_start + cur_q_pos * direction])
except IndexError:
print "YO!", query_start, cur_q_pos, direction, query_start + cur_q_pos * direction
print "Best_q_alignment", best_q_alignment
quit()
else:
best_q_alignment.append('-') # if the value is a zero, we are gapping!
if t_offset:
best_t_alignment.append(self.target[target_start + cur_t_pos * direction])
else:
best_t_alignment.append('-') # if the value is a zero, we are gapping, now the other way
cur_q_pos -= q_offset # Note that we are subtracting positively valued offsets.
cur_t_pos -= t_offset # This design choice makes later printing a traceback table a lot prettier.
# Alternatively, we could have built our alignments by adding things at the beginning using statements like
# best_t_alignment.insert(0,'-') etc. But in Python inserting items at the beginning of a list is much slower
# than appending at the end. We are better off appending at the end, then reversing the whole mess when done.
# print "Returning information about a partial alignment", self.high_score, best_q_alignment, best_t_alignment
# flip 'em both once we are done, since we built them "end-to-beginning". Note that we don't need to flip
# sequences corresponding to backwards extensions!
if direction == self.FORWARD:
best_q_alignment.reverse()
best_t_alignment.reverse()
return self.high_score, ''.join(best_q_alignment), ''.join(best_t_alignment)
def _fill_in_row_or_column(self, cur_q_pos, cur_t_pos, query_start, target_start, direction, row_or_column):
"""This private method will fill in a row or column, depending on the tuple passed in the row_or_column argument
Each row or column is filled in until a zero-valued result is obtained.
"""
# print "filling in a row or column"
max_in_current_row_or_column = 0
q_add, t_add = row_or_column
# These variables will control whether we fill in a row or a column. If the argument row_or_column = (0,1)
# we will end filling in a row. If the argument is assigned (1,0) we will fill a column
while True:
query_position = query_start + cur_q_pos * direction # remember, direction here is either -1 or 1
target_position = target_start + cur_t_pos * direction # so is a positive or negative offset multiplier
# query and target position variables here refer to the actual (absolute) position within the query
# and target sequences respectively
if (query_position < 0) or (target_position < 0):
# print "Ran out of query or target sequence while attempting backwards extension"
break # we can go no further
if (query_position >= self.querylen) or (target_position >= self.targetlen):
# print "Ran out of q or t while attempting forwards extension", query_position, target_position
break # again, we can go no further
q_char = self.query[query_position]
t_char = self.target[target_position]
# print "comparing", q_char, query_position, "to", t_char, target_position
# use PAM table to find the increment
increment = self.PAM[(q_char, t_char)]
match_score = self.blast_table[(cur_q_pos - 1, cur_t_pos - 1)] + increment
# improvement for later - decide whether to apply gap opening or gap extension penalties
# for the moment just set gap increment to the gap_open value
increment = self.gap_open
# scores associated with gapping in either the target or query
target_gap_score = self.blast_table.setdefault((cur_q_pos, cur_t_pos - 1), 0) + increment
query_gap_score = self.blast_table.setdefault((cur_q_pos - 1, cur_t_pos), 0) + increment
best_score = max(
(0, (0, 0)), # a 0 score will never have a traceback
(match_score, (1, 1)), # A match corresponds to a -1,-1 traceback
(target_gap_score, (0, 1)), # A target gap corresponds to a 0, -1 traceback
(query_gap_score, (1, 0)) # A query gap corresponds to a -1, 0 traceback
)
if not best_score[0]:
break
self.blast_table[cur_q_pos, cur_t_pos] = best_score[0]
# The first element in the tuple is the actual score to be recorded
# print "Recording", best_score[0], "at position", cur_q_pos, cur_t_pos
self.traceback_table[cur_q_pos, cur_t_pos] = best_score[1]
# The traceback offsets associated with the score are in a tuple as described earlier
if best_score[0] >= self.high_score:
# This represents the "high road" approach. "low road" would simply be >
self.high_score = best_score[0] # record the new high score
self.high_q_pos = cur_q_pos # also record the i and j positions associated with that score
self.high_t_pos = cur_t_pos
if best_score[0] > max_in_current_row_or_column:
max_in_current_row_or_column = best_score[0]
# The maximum in a particular row or column is different from the overall high score! We actually
# only care if this value is non-zero, as this will tell us that another iteration along the diagonal is
# required.
cur_t_pos += t_add # We end up adding either a zero or a one to these depending on
cur_q_pos += q_add # whether we are filling in a row or a column, setting us up for the next iteration
return max_in_current_row_or_column
def __str__(self):
""" This is a "special method attribute" overwriting the __str__ method defined in object.
__str__ controls what the string representation of objects of the BLAST class will look like.
It is invoked by print statements, which will print the return value. The bad news is that the routine here
was more-or-less just lifted from the old Smith Waterman program. However, BLAST uses a fundamentally
different sort of data structure for representing the blast and traceback tables.
Can you fix this method so that it does something useful?
"""
lineout = 'Scoring table:\n\t' + '\t'.join(self.target) + '\n'
# The above is just a fancy looking way to break the target string into tab-delimited individual characters
for i in xrange(self.querylen):
lineout += self.query[i] + "\t"
for j in xrange(self.targetlen):
lineout += str(self.blast_table[i, j]) + "\t"
lineout += '\n'
lineout += '\n\nTraceback table:\n\t' + '\t'.join(self.target) + '\n'
for i in xrange(self.querylen):
lineout += self.query[i] + "\t"
for j in xrange(self.targetlen):
lineout += ''.join([str(k) for k in self.traceback_table[i, j]]) + "\t"
# just prettying up the traceback tuples
lineout += '\n'
return lineout
# MAIN PROGRAM
numbat = 'LVSMLESYVAAPDLILLDIMMPGMDGLELGGMDGGKPILT'
quoll = 'DDMEVIGTAYNPDVLVLDIIMPHLDGLAVAAMEAGRPLIS'
# calculate PAM120 matrix
A = PAM(N=120)
PAM1 = A.Build_PAMN()
B = BLAST(numbat, quoll, PAM=PAM1)
print B.score()
| 19,404 | 5,707 |
class Solution(object):
def isHappy(self, n):
"""
:type n: int
:rtype: bool
"""
table = set()
while n != 1:
if n in table:
return False
else:
table.add(n)
sum = 0
while n > 0:
digit = n % 10
n /= 10
sum += digit*digit
n = sum
return True | 437 | 121 |
n = input('Digite um algo: ')
print(n.isalpha())
print(n.isupper())
| 68 | 28 |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
setup(
name='ilias2nbgrader',
version='0.4.3',
license='MIT',
url='https://github.com/DigiKlausur/ilias2nbgrader',
description='Exchange submissions and feedbacks between ILIAS and nbgrader',
long_description=readme,
long_description_content_type="text/markdown",
author='Tim Metzler',
author_email='tim.metzler@h-brs.de',
packages=find_packages(exclude=('tests', 'docs')),
install_requires=[
"rapidfuzz",
"nbformat"
],
include_package_data = True,
zip_safe=False,
test_suite='tests',
tests_require=['pytest-cov']
)
| 717 | 257 |
from __future__ import absolute_import, print_function
from numba import jit
import numpy as np
# from externals.six.moves import range
def bayes_boot_probs(n):
"""Bayesian bootstrap sampling for case weights
Parameters
----------
n : int
Number of Bayesian bootstrap samples
Returns
-------
p : 1d array-like
Array of sampling probabilities
"""
p = np.random.exponential(scale=1.0, size=n)
return p/p.sum()
@jit(nopython=True, cache=True, nogil=True)
def auc_score(y_true, y_prob):
"""ADD
Parameters
----------
Returns
-------
"""
y_true, n = y_true[np.argsort(y_prob)], len(y_true)
nfalse, auc = 0, 0.0
for i in range(n):
nfalse += 1 - y_true[i]
auc += y_true[i] * nfalse
auc /= (nfalse * (n - nfalse))
return auc
def logger(name, message):
"""Prints messages with style "[NAME] message"
Parameters
----------
name : str
Short title of message, for example, train or test
message : str
Main description to be displayed in terminal
Returns
-------
None
"""
print('[{name}] {message}'.format(name=name.upper(), message=message))
def estimate_margin(y_probs, y_true):
"""Estimates margin function of forest ensemble
Note : This function is similar to margin in R's randomForest package
Parameters
----------
y_probs : 2d array-like
Predicted probabilities where each row represents predicted
class distribution for sample and each column corresponds to
estimated class probability
y_true : 1d array-like
Array of true class labels
Returns
-------
margin : float
Estimated margin of forest ensemble
"""
# Calculate probability of correct class
n, p = y_probs.shape
true_probs = y_probs[np.arange(n, dtype=int), y_true]
# Calculate maximum probability for incorrect class
other_probs = np.zeros(n)
for i in range(n):
mask = np.zeros(p, dtype=bool)
mask[y_true[i]] = True
other_idx = np.ma.array(y_probs[i,:], mask=mask).argmax()
other_probs[i] = y_probs[i, other_idx]
# Margin is P(y == j) - max(P(y != j))
return true_probs - other_probs
| 2,343 | 760 |
from django.shortcuts import render, get_object_or_404
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.views.generic import ListView
from .models import Post , Comment
from .forms import EmailPostForm , CommentForm , SearchForm
from django.core.mail import send_mail
from taggit.models import Tag
from django.db.models import Count
from django.contrib.postgres.search import SearchVector , SearchQuery , SearchRank , TrigramSimilarity
def post_list(request , tag_slug = None):
object_list = Post.published.all()
tag = None
if tag_slug:
tag = get_object_or_404(Tag , slug = tag_slug)
object_list = object_list.filter(tags__in = [tag])
paginator = Paginator(object_list, 1) # 3 posts in each page
page = request.GET.get('page')
try:
posts = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer deliver the first page
posts = paginator.page(1)
except EmptyPage:
# If page is out of range deliver last page of results
posts = paginator.page(paginator.num_pages)
return render(request,
'blog/post/list.html',
{'page': page,
'posts': posts,
'tag' : tag})
def post_detail(request, year, month, day, post):
post = get_object_or_404(Post, slug=post,
status='published',
publish__year=year,
publish__month=month,
publish__day=day)
#list of active comments for this post
comments = post.comments.filter(active = True)
new_comment = None
if request.method == 'POST':
#A comment was posted
comment_form = CommentForm(data = request.POST)
if comment_form.is_valid():
#create comment object bud dont save to database yet
new_comment = comment_form.save(commit=False)
#assign the current post to the comment
new_comment.post = post
#save the comment to the database
new_comment.save()
else:
comment_form = CommentForm()
post_tags_ids = post.tags.values_list('id' , flat = True)
similar_posts = Post.published.filter(tags__in = post_tags_ids)\
.exclude(id = post.id)
similar_posts = similar_posts.annotate(same_tags = Count('tags'))\
.order_by('-same_tags' , '-publish')[:4]
return render(request,
'blog/post/detail.html',
{'post': post ,
'comments' : comments,
'new_comment':new_comment,
'comment_form':comment_form,
'similar_posts' : similar_posts})
class PostListView(ListView):
queryset = Post.published.all()
context_object_name = 'posts'
paginate_by = 3
template_name = 'blog/post/list.html'
def post_share(request , post_id):
post = get_object_or_404(Post , id = post_id , status = 'published')
sent = False
if request.method == 'POST':
form = EmailPostForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
post_url = request.build_absolute_uri(
post.get_absolute_url())
subject = '{} ({}) recommends you reading" {}" '.format(cd['name'] , cd['email'], post.title)
message = 'Read "{}" at {}\n\n{}\'s comments: {}'.format(post.title , post_url , cd['name'] , cd ['comments'])
send_mail(subject , message , 'admin@arasch.ir' , [cd['to']])
sent = True
else :
form = EmailPostForm()
return render(request , 'blog/post/share.html' , {'post' : post ,
'form' : form ,
'sent' : sent})
def post_search(request):
form = SearchForm()
query = None
results = []
if 'query' in request.GET:
form = SearchForm(request.GET)
if form.is_valid():
query = form.cleaned_data['query']
search_vector = SearchVector('title' , weight = 'A') + SearchVector('body' , weight = 'B')
search_query = SearchQuery(query)
results = Post.objects.annotate(
similarity = TrigramSimilarity('title' , query),
search = search_vector,
rank = SearchRank(search_vector , search_query)
).filter(similarity__gt = 0.3).order_by('-similarity')
return render(request ,
'blog/post/search.html',
{'form' : form ,
'query': query,
'results' : results}) | 4,820 | 1,357 |
from django.utils import timezone
from django.db.models import Q
from celery.decorators import task, periodic_task
from celery.utils.log import get_task_logger
from celery.task.schedules import crontab
from accounts.models.user_profile import ClubUserProfile
from management.models.activity_apply import ActivityApplication
from accounts.models.messages import Messages
from StudentAssociation.utils import message_service
from .utils import send_email
logger = get_task_logger(__name__)
@task(name='celery_send_email')
def celery_send_email(subject, to_email, msg):
logger.info("Send Email")
return send_email(subject, to_email, msg)
@task(name="send_inner_message")
def send_inner_message(content, next_url, to_user, msg_type):
pass
@periodic_task(run_every=crontab(minute=2, hour='8-10'))
def send_msg_to_notice_check():
aps = ActivityApplication.objects.filter(Q(approved_teacher=False) | Q(approved_association=False)
| Q(approved_xuegong=False))
for ap in aps:
apply_time = ap.apply_time
current_time = timezone.now()
re = current_time - apply_time
if re.days >= 1:
if not ap.approved_association and not ap.send_ass:
phone_number = ClubUserProfile.objects.filter(job="活动管理")[0].phone_number
content = "您有一个来自 " + ap.main_club.name + " 活动申请,等待你进行审核哦,请登录社团管理系统进行查看。"
flag, status = message_service(phone_number=phone_number, message=content)
if flag:
ap.send_ass = True
if not ap.approved_teacher and not ap.send_tea:
phone_number = ClubUserProfile.objects.filter(job="指导老师", club=ap.main_club)[0].phone_number
content = "您所管理的社团: " + ap.main_club.name + " ,有一个活动申请等待您的审核,请登录社团管理系统进行查看。"
flag, status = message_service(phone_number=phone_number, message=content)
if flag:
ap.send_tea = True
if not ap.send_xue and not ap.send_xue:
phone_number = ClubUserProfile.objects.filter(job="学工处老师")[0].phone_number
content = "您有一个来自 " + ap.main_club.name + " 活动申请,等待你进行审核哦,请登录社团管理系统进行查看。"
flag, status = message_service(phone_number=phone_number, message=content)
if flag:
ap.send_xue = True
ap.save()
return True
| 2,424 | 845 |
import threading
from datetime import datetime
from io import BytesIO
capture_lock = threading.Lock()
def take_picture(camera):
# Create an in-memory stream
stream = BytesIO()
camera.rotation = 180
camera.annotate_text = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
with capture_lock:
camera.capture(stream, 'jpeg', resize=(720, 480))
value = stream.getvalue()
stream.close()
return value
| 433 | 147 |
import pytest
from tests.tf_tests.functional import BaseFunctionalTest, TENSORFLOW_SUPPORTED, TENSORFLOW_AVAILABLE, MODEL, DATA
class TestTFInference(BaseFunctionalTest):
def test_get_acc(self):
from deeplite.tf_profiler.tf_inference import get_accuracy
assert get_accuracy(MODEL, DATA['test']) < 100
def test_get_topk(self):
from deeplite.tf_profiler.tf_inference import get_topk
assert len(get_topk(MODEL, DATA['test'])) == 2
assert len(get_topk(MODEL, DATA['test'], topk=1)) == 1
def test_get_missclass(self):
from deeplite.tf_profiler.tf_inference import get_missclass
assert get_missclass(MODEL, DATA['test']) > 0
| 692 | 260 |
# Copyright (c) 2008-2011, Jan Gasthaus
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy.random as R
from collections import deque
from utils import *
from numpy import *
class TransitionKernel(object):
def __init__(self,model,params):
self.params = params
self.model = model
def p_walk(self,old_mu,old_lam,mu,lam,tau=None):
raise NotImplementedError
def walk(self,params,tau=None,p_old=None):
raise NotImplementedError
def walk_with_data(self,params,data,tau=None):
"""Sample from the walk given some observersion. This fallback
implementation just samples from the walk ignoring the data."""
return (self.walk(params,tau),1)
def walk_backwards(self,params,tau=None):
return self.walk(params,tau)
class MetropolisWalk(TransitionKernel):
def walk(self,params,tau=None,p_old=None):
if p_old == None:
p_old = exp(self.model.p_log_prior_params(params));
# random walk on mean
n_mu = params.mu + self.params[0] * R.standard_normal(self.model.dims)
n_lam = params.lam + self.params[1] * R.standard_normal(self.model.dims)
# keep values that are about to become negative the same
if self.model.dims > 1:
idx = n_lam <= 0
n_lam[idx] = params.lam[idx]
else:
if n_lam <= 0:
n_lam = params.lam
# Metropolis update rule
new_params = self.model.get_storage(n_mu,n_lam)
p_new = exp(self.model.p_log_prior_params(new_params))
if R.rand() > p_new/p_old: # not accepted -> keep old values
new_params = params
return new_params
class CaronIndependent(TransitionKernel):
def __init__(self,model,params):
TransitionKernel.__init__(self,model,params)
self.num_aux = params[0]
self.rho = params[1]
self.D = model.params.mu0.shape[0]
n0 = self.model.params.n0
mu0 = self.model.params.mu0
alpha = self.model.params.a
beta = self.model.params.b
self.beta_up = n0/(2*(n0+1))
self.np = n0 + 1
self.mu_up1 = (n0*mu0)/self.np
self.mu_up2 = self.np * (alpha+0.5)
self.mu_up3 = 2*alpha + 1
self.gam_up = alpha+0.5
def walk(self,params,tau=None):
return self.__general_walk(params,data=None,tau=tau)
def __general_walk(self,params,data=None,tau=None):
"""General version of the random walk allowing for an arbitrary
number of auxiliary variables and/or data points.
"""
return self.sample_posterior(self.sample_aux(params,tau),data,tau)
def p_log_posterior(self,params,aux_vars,data=None):
n0 = self.model.params.n0
mu0 = self.model.params.mu0
alpha = self.model.params.a
beta = self.model.params.b
num_aux = aux_vars.shape[1]
if data != None:
N = num_aux + 1
nn = num_aux/self.rho + 1
else:
N = num_aux
nn = num_aux/self.rho
if data != None:
aux_vars = c_[aux_vars,data]
data_mean = mean(aux_vars,1)
# make data_mean a rank-2 D-by-1 array so we can use broadcasting
data_mean.shape = (data_mean.shape[0],1)
nvar = sum((aux_vars-data_mean)**2,1)
data_mean.shape = (data_mean.shape[0],)
mu_star = (n0*mu0 + nn*data_mean)/(n0+nn)
beta_star = beta + 0.5*nvar + (nn*n0*(mu0-data_mean)**2)/(2*(n0+nn))
p1 = sum(logpgamma(params.lam,alpha+0.5*nn,beta_star))
p2 = sum(logpnorm(params.mu,mu_star,(nn+n0)*params.lam))
return p1+p2
def p_posterior(self,params,aux_vars,data=None):
return exp(self.p_log_posterior(params,aux_vars,data))
def p_log_aux_vars(self,params,aux_vars):
mu = params.mu
lam = params.lam*self.rho
lnp = 0
for n in range(aux_vars.shape[1]):
lnp += sum(logpnorm(aux_vars[:,n],mu,lam))
return lnp
def sample_posterior(self,aux_vars,data=None,tau=None):
"""Sample from the posterior given the auxiliary variables and data."""
n0 = self.model.params.n0
mu0 = self.model.params.mu0
alpha = self.model.params.a
beta = self.model.params.b
num_aux = aux_vars.shape[1]
if data != None:
N = num_aux + 1
nn = num_aux/self.rho + 1
else:
N = num_aux
nn = num_aux/self.rho
if data != None:
aux_vars = c_[aux_vars,data]
data_mean = mean(aux_vars,1)
# make data_mean a rank-2 D-by-1 array so we can use broadcasting
data_mean.shape = (data_mean.shape[0],1)
nvar = sum((aux_vars-data_mean)**2,1)
data_mean.shape = (data_mean.shape[0],)
mu_star = (n0*mu0 + nn*data_mean)/(n0+nn)
beta_star = beta + 0.5*nvar + (nn*n0*(mu0-data_mean)**2)/(2*(n0+nn))
n_lam = rgamma(alpha+0.5*nn,beta_star)
n_mu = rnorm(mu_star,(nn+n0)*n_lam)
return self.model.get_storage(n_mu,n_lam)
def sample_aux(self,params,tau=None):
"""Sample auxiliary variables given the current state."""
return rnorm_many(params.mu,params.lam*self.rho,self.num_aux)
def walk_with_data(self,params,data,tau=None):
aux_vars = self.sample_aux(params,tau)
params = self.sample_posterior(aux_vars,data,tau)
p1 = self.p_posterior(params,aux_vars,None)
p2 = self.p_posterior(params,aux_vars,data)
return (params,p1/p2)
class Model(object):
pass
class DiagonalConjugate(Model):
def __init__(self,hyper_params,kernelClass=MetropolisWalk,
kernelParams=(0.1,0.001)):
self.params = hyper_params
self.dims = self.params.dims
self.empty = True
self.kernel = kernelClass(self,kernelParams)
self.walk = self.kernel.walk
self.walk_with_data = self.kernel.walk_with_data
def set_data(self,data):
if len(data.shape) <= 1:
# just one data point
self.mean = data
self.nvar = zeros_like(data)
self.nk = 1
self.nn = self.params.n0 + 1
self.mun = (self.params.n0 * self.params.mu0 + self.mean)/self.nn
self.bn = self.params.b + 0.5/self.nn*self.params.n0* \
(self.params.mu0 - self.mean)**2
self.ibn = 1/self.bn;
else:
self.mean = mean(data,1)
# column vector of variances
self.nvar = (data - samplemean)**2
self.nk = data.shape[1]
self.nn = self.params.n0 + self.nk
self.mun = (self.params.n0 * self.params.mu0 +
self.nk * self.mean)/(self.nn)
self.bn = (self.params.b + 0.5*self.nvar +
0.5/self.nn*self.nk*self.params.n0*
(self.params.mu0 - self.mean)**2)
self.ibn = 1/self.bn;
self.empty = False
def p_log_likelihood(self,x,params):
"""Compute log p(x|params)"""
return sum(logpnorm(x,params.mu,params.lam))
def p_likelihood(self,x,params):
return exp(self.p_log_likelihood(x,params))
def p_log_predictive(self,x):
"""Compute log p(x|z)."""
if self.empty:
p = self.p_log_prior(x)
else:
p = sum(logpstudent(
x,
self.mun,
self.nn*(self.params.a + 0.5*self.nk)/(self.nn + 1)*self.ibn,
2*self.params.a+self.nk))
return p
def p_predictive(self,x):
return exp(self.p_log_predictive(x))
def p_log_posterior_mean(self,mu,lam):
"""Compute log p(mu|z)."""
if self.empty:
p = 0;
else:
p = sum(logpnorm(mu,self.mun,lam*self.nn))
return p
def p_log_posterior_precision(self,lam):
if self.empty:
p = 0;
else:
p = sum(logpgamma(lam,self.params.a+0.5*self.nk,self.bn));
return p
def p_log_posterior(self,params):
return (self.p_log_posterior_mean(params.mu,params.lam) +
self.p_log_posterior_precision(params.lam))
def p_posterior(self,params):
return exp(self.p_log_posterior(params))
def p_log_prior(self,x):
"""Compute log p(x) (i.e. \int p(x|theta)p(theta) dtheta)."""
return sum(logpstudent(x,self.params.mu0,
self.params.n0/(self.params.n0+1)*self.params.a/self.params.b,
2.*self.params.a))
def p_prior(self,x):
return exp(self.p_log_prior(x))
def p_log_prior_params(self,params):
return (
sum(logpnorm(params.mu,self.params.mu0,self.params.n0 * params.lam))
+ sum(logpgamma(params.lam,self.params.a,self.params.b)))
def p_prior_params(self,params):
return exp(self.p_log_prior_params(params))
def sample_posterior(self):
if self.empty:
return self.sample_prior()
lam = rgamma(self.params.a+0.5*self.nk,self.bn)
mu = rnorm(self.mun,lam*self.nn)
return self.get_storage(mu,lam)
def sample_prior(self):
lam = rgamma(self.params.a,self.params.b)
mu = rnorm(self.params.mu0,self.params.n0 * lam)
return self.get_storage(mu,lam)
def sample_Uz(self,mu,lam,data,num_sir_samples=10):
"""Sample from p(U|U_old,z)=p(U|U_old)p(z|U)/Z."""
if self.empty:
return (self.walk(mu,lam),1)
# SIR: sample from P(U|U_old), compute weights P(x|U), then
# sample from the discrete distribution.
mu_samples = zeros((self.dims,num_sir_samples))
lam_samples = zeros((self.dims,num_sir_samples))
sir_weights = zeros(num_sir_samples)
p_old = self.p_log_prior_params(mu,lam);
for s in range(num_sir_samples):
tmp = walk(mu,lam,p_old=p_old);
mu_samples[:,s] = tmp.mu
lam_samples[:,s] = tmp.lam
sir_weights[s] = self.p_posterior(tmp.mu,tmp.lam)
sir_weights = sir_weights / sum(sir_weights);
s = rdiscrete(sir_weights)
new_mu = mu_samples[:,s]
new_lam = lam_samples[:,s]
weight = sir_weights[s]
return (self.get_storage(new_mu,new_lam),weight)
def get_storage(self,mu=None,lam=None):
"""Get a new parameter storage object."""
return DiagonalConjugateStorage(mu,lam)
class DiagonalConjugateHyperParams(object):
def __init__(self,a,b,mu0,n0,dims=None):
if dims != None:
self.a = ones(dims) * a
self.b = ones(dims) * b
self.mu0 = ones(dims) * mu0
else:
self.a = a
self.b = b
self.mu0 = mu0
self.n0 = n0
if self.a.shape != self.b.shape:
raise ValueError, "shape mismatch: a.shape: " + str(a.shape) +\
"b.shape: " + str(b.shape)
elif self.a.shape != self.mu0.shape:
raise ValueError, "shape mismatch: a.shape: " + str(a.shape) +\
"mu0.shape: " + str(mu0.shape)
if len(self.a.shape)!= 0:
self.dims = self.a.shape[0]
else:
self.dims = 1
def compute_stats(self):
e_mu = self.mu0
v_mu = self.b / (self.n0*(self.a-1))
e_lam = self.a/self.b
v_lam = self.a/(self.b**2)
out = ("E[mu] = %.3f, V[mu] = %.3f, E[lam] = %.3f, V[lam] = %.3f"
% (e_mu,v_mu,e_lam,v_lam))
return out
def __str__(self):
out = ['Model hyperparameters:\n']
out.append('a: ' + str(self.a) + '\n')
out.append('b: ' + str(self.b) + '\n')
out.append('mu0: ' + str(self.mu0) + '\n')
out.append('n0: ' + str(self.n0) + '\n')
return ''.join(out)
class DiagonalConjugateStorage(object):
"""Class for storing the parameter values of a single component."""
def __init__(self,mu=None,lam=None):
self.mu = mu
self.lam = lam
def __str__(self):
return 'mu: ' + str(self.mu) + '\nlambda: ' + str(self.lam)
def __repr__(self):
return self.__str__()
class Particle(object):
"""The Particle class stores the state of the particle filter / Gibbs
sampler.
"""
def __init__(self,T,copy=None,storage_class=FixedSizeStoreRing,
max_clusters=100):
if copy != None:
self.T = copy.T
self.c = copy.c.copy()
self.d = copy.d.copy()
self.K = copy.K
self.max_clusters = copy.max_clusters
self.mstore = copy.mstore.shallow_copy()
self.lastspike = copy.lastspike.shallow_copy()
self.U = copy.U.shallow_copy()
self.birthtime = copy.birthtime.shallow_copy()
self.deathtime = copy.deathtime.shallow_copy()
self.storage_class = copy.storage_class
else:
self.T = T
self.storage_class = storage_class
self.max_clusters = max_clusters
# allocation variables for all time steps
self.c = -1*ones(T,dtype=int16)
# death times of allocation variables (assume they
# don't die until they do)
self.d = T * ones(T,dtype=uint32)
# total number of clusters in this particle up to the current time
self.K = 0
# array to store class counts at each time step
self.mstore = self.storage_class(T,dtype=int32,
max_clusters=self.max_clusters)
# storage object for the spike time of the last spike associated
# with each cluster for each time step.
self.lastspike = self.storage_class(T,dtype=float64,
max_clusters=self.max_clusters)
# Parameter values of each cluster 1...K at each time step 1...T
self.U = self.storage_class(T,dtype=object,
max_clusters=self.max_clusters)
# vector to store the birth times of clusters
self.birthtime = ExtendingList()
# vector to store the death times of allocation variables
# (0 if not dead)
self.deathtime = ExtendingList()
def shallow_copy(self):
"""Make a shallow copy of this particle.
In essence, copies of lists are created, but the list contents are not
copied. This is useful for making copies of particles during
resampling, such that the resulting particles share the same history,
but can be moved forward independently.
"""
return Particle(self.T,self)
def __str__(self):
out = []
out.append('c: ' + str(self.c)+'\n')
out.append('d: ' + str(self.d)+'\n')
out.append('K: ' + str(self.K)+'\n')
out.append('mstore: ' + str(self.mstore)+'\n')
out.append('lastspike: ' + str(self.lastspike)+'\n')
out.append('U: ' + str(self.U)+'\n')
return ''.join(out)
__repr__ = __str__
class GibbsState():
"""Class representing the state of the Gibbs sampler. This is similar to
a particle in many respects. However, as in the Gibbs sampler we only
need to hold one state object in memory at any given time, we can trade
off speed and memory consumption differently.
If a particle object is passed to the constructor it will be used to
initialize the state.
"""
def __init__(self,particle=None,model=None,max_clusters=100):
self.max_clusters = max_clusters
if particle != None and model != None:
self.from_particle(particle,model)
else:
self.__empty_state()
def from_particle(self,particle,model):
"""Construct state from the given particle object."""
self.T = particle.T
# allocation variables for all time steps
self.c = particle.c.copy()
# death times of allocation variables
self.d = particle.d.copy()
# make sure the maximum death time is T
self.d[self.d>self.T] = self.T
# total number of clusters in the current state
self.K = particle.K
# array to store class counts at each time step
self.mstore = zeros((self.max_clusters,self.T),dtype=int32)
self.lastspike = zeros((self.max_clusters,self.T),dtype=float64)
self.U = empty((self.max_clusters,self.T),dtype=object)
self.aux_vars = zeros(
(self.T,
self.max_clusters,
model.kernel.D,
model.kernel.num_aux))
for t in range(self.T):
m = particle.mstore.get_array(t)
n = m.shape[0]
self.mstore[0:n,t] = m
m = particle.lastspike.get_array(t)
n = m.shape[0]
self.lastspike[0:n,t] = m
m = particle.U.get_array(t)
n = m.shape[0]
self.U[0:n,t] = m
# vector to store the birth times of clusters
self.birthtime = particle.birthtime.to_array(self.max_clusters,
dtype=int32)
# vector to store the death times of clusters (0 if not dead)
self.deathtime = particle.deathtime.to_array(self.max_clusters,
dtype=int32)
self.deathtime[self.deathtime==0] = self.T
# determine active clusters
active = where(sum(self.mstore,1)>0)[0]
# compute free labels
self.free_labels = deque(reversed(list(set(range(self.max_clusters))
-set(active))))
# all clusters must have parameters from time 0 to their death
# -> sample them from their birth backwards
for c in active:
logging.debug("sampling params for cluster %i at time %i"
% (c,t))
for tau in reversed(range(0,self.birthtime[c])):
self.aux_vars[tau,c,:,:] = model.kernel.sample_aux(
self.U[c,tau+1])
self.U[c,tau] = model.kernel.sample_posterior(
self.aux_vars[tau,c,:,:])
self.initialize_aux_variables(model)
def initialize_aux_variables(self,model):
"""Sample initial value for the auxiliary variables given the rest of
of the state. This is done by sampling forward in time."""
for t in range(self.T):
active = where(self.mstore[:,t]>0)[0]
for c in active:
if t >= self.birthtime[c]:
self.aux_vars[t,c,:,:] = model.kernel.sample_aux(
self.U[c,t])
def __empty_state(self):
"""Set all fields to represent an empty state."""
pass
def check_consistency(self,data_time):
"""Check consistency of the Gibbs sampler state.
In particular, perform the following checks:
1) if m(c,t) > 0 then U(c,t) != None
2) m(c,birth:death-1)>0 and m(c,0:birth)==0 and m(c,death:T)==0
3) m matches the information in c and deathtime
4) check that lastspike is correct
"""
errors = 0
# check 1) we have parameter values for all non-empty clusters
idx = where(self.mstore>0)
if any(isNone(self.U[idx])):
logging.error("Consitency error: Some needed parameters are None!"+
str(where(isNone(self.U[idx]))))
errors += 1
# check 1b) we need parameter values from 0 to the death of each cluster
active = where(sum(self.mstore,1)>0)[0]
for c in active:
d = self.deathtime[c]
if any(isNone(self.U[c,0:d])):
logging.error("Consitency error: Parameters not avaliable " +
"from the start")
# check 2) There are no "re-births", assuming birthtime and deathtime
# are correct
active = where(sum(self.mstore,1)>0)[0]
for c in active:
# the death time of _cluster_ c is the first zero after its birth
birth = self.birthtime[c]
active_birth_to_end = where(self.mstore[c,birth:]==0)[0]
if active_birth_to_end.shape[0] == 0:
death = self.T
else:
death = birth + active_birth_to_end[0]
if death != self.deathtime[c]:
logging.error("deatime does not contain the first zero after "+
"birth of cluster %i (%i!=%i)" %
(c,self.deathtime[c],death))
if (any(self.mstore[c,birth:death]==0)):
logging.error(("Consistency error: mstore 0 while " +
"cluster %i is alive") % c)
if any(self.mstore[c,0:birth]>0):
logging.error(("Consistency error: mstore > 0 while " +
"cluster %i is not yet born") % c)
if any(self.mstore[c,death:]>0):
logging.error(("Consistency error: mstore > 0 while "
"cluster %i is already dead!") % c)
# check 3) we can reconstruct mstore from c and d
new_ms = self.reconstruct_mstore(self.c,self.d)
if any(self.mstore != new_ms):
logging.error("Consitency error: Cannot reconstruct " +
"mstore from c and d")
# check 4)
# lastspike[c,t] is supposed to contain the last spike time for all
# clusters _after_ the observation at time t
lastspike = zeros(self.max_clusters)
for t in range(self.T):
lastspike[self.c[t]] = data_time[t]
if any(self.lastspike[:,t]!=lastspike):
logging.error("Consistency error:lastspike incorrect at " +
"time %i" % t)
logging.error(str(where(self.lastspike[:,t]!=lastspike)))
def reconstruct_lastspike(self,data_time):
lastspike = zeros(self.max_clusters)
for t in range(self.T):
lastspike[self.c[t]] = data_time[t]
self.lastspike[:,t] = lastspike
def reconstruct_mstore(self,c,d):
new_ms = zeros_like(self.mstore)
for t in range(self.T):
if t > 0:
new_ms[:,t] = new_ms[:,t-1]
new_ms[c[t],t] += 1
dying = where(d == t)[0]
for tau in dying:
new_ms[c[tau],t] -= 1
return new_ms
def __str__(self,include_U=True):
out = []
out.append('c: ' + str(self.c)+'\n')
out.append('d: ' + str(self.d)+'\n')
out.append('K: ' + str(self.K)+'\n')
out.append('mstore: ' + str(self.mstore)+'\n')
out.append('lastspike: ' + str(self.lastspike)+'\n')
if include_U:
out.append('U: ' + str(self.U)+'\n')
return ''.join(out)
| 24,372 | 8,208 |
# Generated by Django 3.2.6 on 2021-09-24 07:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("documents", "0006_auto_20210924_0613"),
]
operations = [
migrations.RemoveField(
model_name="documentsort",
name="html_fragment",
),
]
| 343 | 127 |
# Copyright 2019 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import configparser
import logging
import yaml
from airflow import AirflowException
from airflow.plugins_manager import AirflowPlugin
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
import kubernetes
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_config_map import V1ConfigMap
from kubernetes.client.models.v1_object_meta import V1ObjectMeta
from shipyard_airflow.conf import config
from shipyard_airflow.control.helpers.action_helper import \
get_deployment_status
from shipyard_airflow.plugins.xcom_puller import XcomPuller
from shipyard_airflow.common.document_validators.document_validation_utils \
import DocumentValidationUtils
from shipyard_airflow.plugins.deckhand_client_factory import \
DeckhandClientFactory
from shipyard_airflow.common.document_validators.errors import \
DocumentNotFoundError
LOG = logging.getLogger(__name__)
# Variable to hold details about how the Kubernetes ConfigMap is stored
CONFIG_MAP_DETAILS = {
'api_version': 'v1',
'kind': 'ConfigMap',
'pretty': 'true'
}
class DeploymentStatusOperator(BaseOperator):
"""Deployment status operator
Update Kubernetes with the deployment status of this dag's action
"""
@apply_defaults
def __init__(self, shipyard_conf, main_dag_name, force_completed=False,
*args, **kwargs):
super(DeploymentStatusOperator, self).__init__(*args, **kwargs)
self.shipyard_conf = shipyard_conf
self.main_dag_name = main_dag_name
self.force_completed = force_completed
self.xcom_puller = None
def execute(self, context):
"""Execute the main code for this operator.
Create a ConfigMap with the deployment status of this dag's action
"""
LOG.info("Running deployment status operator")
self.xcom_puller = XcomPuller(self.main_dag_name, context['ti'])
# Required for the get_deployment_status helper to function properly
config.parse_args(args=[], default_config_files=[self.shipyard_conf])
# First we need to check if the concurrency check was successful as
# this operator is expected to run even if upstream steps fail
if not self.xcom_puller.get_concurrency_status():
msg = "Concurrency check did not pass, so the deployment status " \
"will not be updated"
LOG.error(msg)
raise AirflowException(msg)
deployment_status_doc, revision_id = self._get_status_and_revision()
deployment_version_doc = self._get_version_doc(revision_id)
full_data = {
'deployment': deployment_status_doc,
**deployment_version_doc
}
config_map_data = {'release': yaml.safe_dump(full_data)}
self._store_as_config_map(config_map_data)
def _get_status_and_revision(self):
"""Retrieve the deployment status information from the appropriate
helper function
:return: dict with the status of the deployment
:return: revision_id of the action
"""
action_info = self.xcom_puller.get_action_info()
deployment_status = get_deployment_status(
action_info,
force_completed=self.force_completed)
revision_id = action_info['committed_rev_id']
return deployment_status, revision_id
def _get_version_doc(self, revision_id):
"""Retrieve the deployment-version document from Deckhand
:param revision_id: the revision_id of the docs to grab the
deployment-version document from
:return: deployment-version document returned from Deckhand
"""
# Read and parse shipyard.conf
config = configparser.ConfigParser()
config.read(self.shipyard_conf)
doc_name = config.get('document_info', 'deployment_version_name')
doc_schema = config.get('document_info', 'deployment_version_schema')
dh_client = DeckhandClientFactory(self.shipyard_conf).get_client()
dh_tool = DocumentValidationUtils(dh_client)
try:
deployment_version_doc = dh_tool.get_unique_doc(
revision_id=revision_id,
schema=doc_schema,
name=doc_name)
return deployment_version_doc
except DocumentNotFoundError:
LOG.info("There is no deployment-version document in Deckhand "
"under the revision '{}' with the name '{}' and schema "
"'{}'".format(revision_id, doc_name, doc_schema))
return {}
def _store_as_config_map(self, data):
"""Store given data in a Kubernetes ConfigMap
:param dict data: The data to store in the ConfigMap
"""
LOG.info("Storing deployment status as Kubernetes ConfigMap")
# Read and parse shipyard.conf
config = configparser.ConfigParser()
config.read(self.shipyard_conf)
name = config.get('deployment_status_configmap', 'name')
namespace = config.get('deployment_status_configmap', 'namespace')
k8s_client = self._get_k8s_client()
cfg_map_obj = self._create_config_map_object(name, namespace, data)
cfg_map_naming = "(name: {}, namespace: {})".format(name, namespace)
try:
LOG.info("Updating deployment status config map {}, "
.format(cfg_map_naming))
k8s_client.patch_namespaced_config_map(
name,
namespace,
cfg_map_obj,
pretty=CONFIG_MAP_DETAILS['pretty'])
except ApiException as err:
if err.status != 404:
raise
# ConfigMap still needs to be created
LOG.info("Deployment status config map does not exist yet")
LOG.info("Creating deployment status config map {}".format(
cfg_map_naming))
k8s_client.create_namespaced_config_map(
namespace,
cfg_map_obj,
pretty=CONFIG_MAP_DETAILS['pretty'])
@staticmethod
def _get_k8s_client():
"""Create and return a Kubernetes client
:returns: A Kubernetes client object
:rtype: kubernetes.client
"""
# Note that we are using 'in_cluster_config'
LOG.debug("Loading Kubernetes config")
kubernetes.config.load_incluster_config()
LOG.debug("Creating Kubernetes client")
return kubernetes.client.CoreV1Api()
@staticmethod
def _create_config_map_object(name, namespace, data):
"""Create/return a Kubernetes ConfigMap object out of the given data
:param dict data: The data to put into the config map
:returns: A config map object made from the given data
:rtype: V1ConfigMap
"""
LOG.debug("Creating Kubernetes config map object")
metadata = V1ObjectMeta(
name=name,
namespace=namespace
)
return V1ConfigMap(
api_version=CONFIG_MAP_DETAILS['api_version'],
kind=CONFIG_MAP_DETAILS['kind'],
data=data,
metadata=metadata
)
class DeploymentStatusOperatorPlugin(AirflowPlugin):
"""Creates DeploymentStatusOperatorPlugin in Airflow."""
name = "deployment_status_operator"
operators = [DeploymentStatusOperator]
| 8,061 | 2,208 |
import numpy as np
from optparse import OptionParser
from sigvisa.treegp.gp import GP, GPCov
from sigvisa import Sigvisa
from sigvisa.source.event import get_event
from sigvisa.treegp.cover_tree import VectorTree
import pyublas
def main():
parser = OptionParser()
s = Sigvisa()
cursor = s.dbconn.cursor()
parser.add_option("--evid_file", dest="evid_file", default=None, type="str", help="file containing evids to cluster")
parser.add_option("--clusters_file", dest="clusters_file", default=None, type="str", help="file containing cluster centers")
parser.add_option("--out_prefix", dest="out_prefix", default="cluster_evids", type="str", help="prefix for output evid files")
parser.add_option("--dummy", dest="dummy", default=False, action="store_true", help="don't actually write any files, just print cluster sizes")
(options, args) = parser.parse_args()
evids = np.array([int(evid) for evid in np.loadtxt(options.evid_file)])
evs = [get_event(evid) for evid in evids]
X = np.array([(ev.lon, ev.lat, ev.depth) for ev in evs])
cluster_centers = np.loadtxt(options.clusters_file)
cluster_metric = GPCov(wfn_str="se", dfn_str="lld", dfn_params=(1.0, 1.0), wfn_params=(1.0,))
cluster_tree = VectorTree(cluster_centers, 1, *cluster_metric.tree_params())
cluster_distances = cluster_tree.kernel_matrix(pyublas.why_not(X),
pyublas.why_not(cluster_centers), True)
cluster_assignments = np.argmin(cluster_distances, axis=1)
n_clusters = len(cluster_centers)
for i in range(n_clusters):
idxs = cluster_assignments == i
cluster_evids = evids[idxs]
out_fname = options.out_prefix + "_%03d" % i
distances= cluster_distances[idxs, i]
mind, maxd, meand = np.min(distances), np.max(distances), np.mean(distances)
if not options.dummy:
np.savetxt(out_fname, cluster_evids, fmt="%d")
print "wrote", len(cluster_evids), "events to", out_fname, "distance to center min %.1f max %.1f mean %.f" % (mind, maxd, meand)
if __name__ == "__main__":
main()
| 2,162 | 752 |
# =================================================
# GUI program to analyse STEM images of filamentous structures: TRACKING
# -----------------------------------------------------------------------------
# Version 1.0
# Created: November 7th, 2017
# Last modification: January 8th, 2019
# author: @Cristina_MT
# =================================================
from sys import platform as sys_pf
import tkinter as tk
from tkinter import ttk, filedialog
import time
import numpy as np
from PIL import Image
if sys_pf == 'darwin':
import matplotlib
matplotlib.use('TkAgg')
from wintrack import WindowTracking
class fiAS(tk.Frame):
def __init__(self, master = None):
fiAS.controlanchor = 0
tk.Frame.__init__(self, master)
self.grid(sticky = 'nsew')
WindowTracking.__init__(self)
app = fiAS()
app.master.title('fiAS Tracking v1.0 (January 2019)')
if fiAS.controlanchor == 0: app.master.geometry('800x600+50+50')
elif fiAS.controlanchor == 1: app.master.geometry('900x550+50+50')
app.mainloop()
| 1,048 | 378 |
import random
p = [4, 3, 4, 4, 5, 3, 5, 4, 4, 5, 4, 4, 3, 4, 5, 4, 3, 4]
b = ['b', 0, 'B']
f = [{i: [0, 0] for i in range(4)} for z in range(3)]
w = None
for r in range(3):
c = True
a = [0, 1, 2, 3]
m = None
while c:
t = [map(lambda x: random.randint(x-1, x+1), p) for i in range(4)]
s = [sum(i) for i in t]
g = [[l if b[l-p[i]+1] == 0 else b[l-p[i]+1] for i, l in enumerate(l)] for l in t]
m = min(s)
if s.count(m) == 1:
c = False
if w is not None:
l = max(s)
i = s.index(l)
f[r][w] = [l, g[i]]
del s[i]
del g[i]
a.remove(w)
for i in range(len(a)):
f[r][a[i]] = [s[i], g[i]]
w = s.index(min(s))
for r in f:
print "Round %d" % (f.index(r)+1)
for p, q in sorted(r.iteritems(), key=lambda (x, y): y[0]):
print "Player %d: %s - %d" % ((p+1), reduce(lambda x, y: '{} {}'.format(x, y), q[1]), q[0])
| 946 | 463 |
"""Util module tests
"""
import os.path
from unittest import TestCase
import mupub
from clint.textui.validators import ValidationError
from .tutils import PREFIX
_SIMPLE_PATH = os.path.join(PREFIX, 'SorF', 'O77', 'sorf-o77-01',)
_LYS_PATH = os.path.join(PREFIX, 'PaganiniN', 'O1', 'Caprice_1',)
class UtilsTest(TestCase):
"""Utils testing"""
def test_find(self):
"""Find files (for zipping ly files)"""
here = os.getcwd()
try:
os.chdir(_SIMPLE_PATH)
flist = mupub.utils.find_files('.')
self.assertEqual(len(flist), 2)
finally:
os.chdir(here)
def test_resolve(self):
"""Resolving file input"""
here = os.getcwd()
try:
for test_path in [_SIMPLE_PATH, _LYS_PATH,]:
os.chdir(test_path)
base,infile = mupub.utils.resolve_input()
self.assertEqual(base, os.path.basename(test_path))
self.assertIsNotNone(infile)
finally:
os.chdir(here)
def test_bools(self):
boolv = mupub.utils.BooleanValidator('some message')
boolv_nom = mupub.utils.BooleanValidator()
self.assertTrue(boolv('y'), 'y is True')
self.assertFalse(boolv('n'), 'n is False')
self.assertTrue(not boolv_nom('N'), 'not N is True')
with self.assertRaises(ValidationError):
if boolv('x'):
self.assertFail('should not be here!')
| 1,479 | 496 |
"""CLI handling for `routemaster`."""
import logging
import yaml
import click
import layer_loader
from routemaster.app import App
from routemaster.cron import CronThread
from routemaster.config import ConfigError, load_config
from routemaster.server import server
from routemaster.middleware import wrap_application
from routemaster.validation import ValidationError, validate_config
from routemaster.gunicorn_application import GunicornWSGIApplication
logger = logging.getLogger(__name__)
@click.group()
@click.option(
'-c',
'--config-file',
'config_files',
help="Path to the service config file.",
type=click.File(encoding='utf-8'),
required=True,
multiple=True,
)
@click.pass_context
def main(ctx, config_files):
"""Shared entrypoint configuration."""
logging.getLogger('schedule').setLevel(logging.CRITICAL)
config_data = layer_loader.load_files(
config_files,
loader=yaml.load,
)
try:
config = load_config(config_data)
except ConfigError:
logger.exception("Configuration Error")
click.get_current_context().exit(1)
ctx.obj = App(config)
_validate_config(ctx.obj)
@main.command()
@click.pass_context
def validate(ctx):
"""
Entrypoint for validation of configuration files.
Validation is done by the main handler in order to cover all code paths,
so this function is a stub so that `serve` does not have to be called.
"""
pass
@main.command()
@click.option(
'-b',
'--bind',
help="Bind address and port.",
type=str,
default='[::]:2017',
)
@click.option(
'--debug/--no-debug',
help="Enable debugging mode.",
default=False,
)
@click.option(
'--workers',
help="Number of gunicorn workers to run.",
type=int,
default=1,
)
@click.pass_context
def serve(ctx, bind, debug, workers): # pragma: no cover
"""Entrypoint for serving the Routemaster HTTP service."""
app = ctx.obj
server.config.app = app
if debug:
server.config['DEBUG'] = True
cron_thread = CronThread(app)
cron_thread.start()
wrapped_server = wrap_application(app, server)
def post_fork():
app.initialise()
app.logger.init_flask(server)
try:
instance = GunicornWSGIApplication(
wrapped_server,
bind=bind,
debug=debug,
workers=workers,
post_fork=post_fork,
)
instance.run()
finally:
cron_thread.stop()
def _validate_config(app: App):
try:
validate_config(app, app.config)
except ValidationError as e:
msg = f"Validation Error: {e}"
logger.exception(msg)
click.get_current_context().exit(1)
| 2,736 | 842 |
import datetime
import logging
import moto
import pytest
from .. import s3_cleanup
class TestBucketsMoreThanTTL:
@pytest.fixture
def test_class(self):
with moto.mock_s3():
whitelist = {}
settings = {
"general": {"dry_run": False},
"services": {"s3": {"buckets": {"clean": True, "ttl": -1}}},
}
execution_log = {"AWS": {}}
test_class = s3_cleanup.S3Cleanup(
logging, whitelist, settings, execution_log
)
yield test_class
def test(self, test_class):
# create test table
test_class.client_s3.create_bucket(Bucket="test")
# validate bucket creation
response = test_class.client_s3.list_buckets()
assert response["Buckets"][0]["Name"] == "test"
# test buckets functions
test_class.buckets()
# validate bucket deletion
response = test_class.client_s3.list_buckets()
assert response["Buckets"] == []
class TestBucketsLessThanTTL:
@pytest.fixture
def test_class(self):
with moto.mock_s3():
whitelist = {}
settings = {
"general": {"dry_run": False},
"services": {"s3": {"buckets": {"clean": True, "ttl": 5000}}},
}
execution_log = {"AWS": {}}
test_class = s3_cleanup.S3Cleanup(
logging, whitelist, settings, execution_log
)
yield test_class
def test(self, test_class):
# create test table
test_class.client_s3.create_bucket(Bucket="test")
# validate bucket creation
response = test_class.client_s3.list_buckets()
assert response["Buckets"][0]["Name"] == "test"
# test buckets functions
test_class.buckets()
# validate bucket deletion
response = test_class.client_s3.list_buckets()
assert response["Buckets"][0]["Name"] == "test"
class TestBucketsWhitelist:
@pytest.fixture
def test_class(self):
with moto.mock_s3():
whitelist = {"s3": {"bucket": ["test"]}}
settings = {
"general": {"dry_run": False},
"services": {"s3": {"buckets": {"clean": True, "ttl": -1}}},
}
execution_log = {"AWS": {}}
test_class = s3_cleanup.S3Cleanup(
logging, whitelist, settings, execution_log
)
yield test_class
def test(self, test_class):
# create test table
test_class.client_s3.create_bucket(Bucket="test")
# validate bucket creation
response = test_class.client_s3.list_buckets()
assert response["Buckets"][0]["Name"] == "test"
# test buckets functions
test_class.buckets()
# validate bucket deletion
response = test_class.client_s3.list_buckets()
assert response["Buckets"][0]["Name"] == "test"
| 2,988 | 912 |
"""
When a widget is positioned with sticky,
the size of the widget itself is just big
enough to contain any text and other
contents inside of it. It won’t fill the
entire grid cell. In order to fill the
grid, you can specify "ns" to force the
widget to fill the cell in the vertical
direction, or "ew" to fill the cell in the
vertical direction. To fill the entire
cell, set sticky to "nsew". The following
example illustrates each of these options:
"""
import tkinter as tk
window = tk.Tk()
window.rowconfigure(0, minsize=50)
window.columnconfigure([0, 1, 2, 3], minsize=50)
label1 = tk.Label(text="1", bg="black", fg="white")
label2 = tk.Label(text="2", bg="black", fg="white")
label3 = tk.Label(text="3", bg="black", fg="white")
label4 = tk.Label(text="4", bg="black", fg="white")
label1.grid(row=0, column=0)
label2.grid(row=0, column=1, sticky="ew")
label3.grid(row=0, column=2, sticky="ns")
label4.grid(row=0, column=3, sticky="nsew")
window.mainloop() | 965 | 345 |
#!/usr/bin/env python
'''
**************************************************************************
* This class performs most of the graph manipulations.
* @authors Benjamin Renoust, Guy Melancon
* @created May 2012
**************************************************************************
'''
import json
import sys
from tulip import *
import entanglementAnalysisLgt
import entanglementSynchronization
import harmonizedLayout
'''
This class stores the graphs, and performs the manipulations on it.
I guess we want in the future to propose only one graph per session, and maybe store different graphs.
'''
class graphManager():
root_deprecated = 0
graph_deprecated = 0
substrate = 0
catalyst = {}
'''
This method converts a graph to a JSON string, given some parameters:
graph, the graph to convert (if null, substrate is considered)
properties, a map of the properties that should be included in the JSON {nodes|links:[{name:xxx,type:yyy}]}
nodeList, a selection of nodes (array)
edgeList, a selection of edges (array)
Returns the new JSON string.
The method can then restrict the amount of information dumped in the JSON to only what it is passed beforehand.
Default is 'baseID', 'x', 'y', 'source', 'target'.
Extra data can be passed through the 'data' member of 'properties'
'''
def graphToJSON(self, graph=0, properties={}, nodeList=0, edgeList=0):
if not graph:
graph = self.substrate
if not nodeList:
nodeList = graph.getNodes()
if not edgeList:
edgeList = graph.getEdges()
nList = {}
eList= {}
bID = graph.getDoubleProperty("baseID")
if not properties:
vLayout = graph.getLayoutProperty("viewLayout")
nList = {"nodes":[{"id":n.id,"x":vLayout[n][0],"y":vLayout[n][1], "baseID":bID[n]} for n in nodeList]}
#nToI = {nList["nodes"][i]["baseID"]:i for i in range(len(nList["nodes"]))}
eList = {"links":[{"source":bID[graph.source(e)], "target":bID[graph.target(e)], "value":1, "baseID":bID[e]} for e in edgeList]}
nList.update(eList)
#print "dumping: ", json.dumps(nList)
return json.dumps(nList)
else:
if 'nodes' in properties:
nodesProp = properties['nodes']
propInterface = {}
#print nodesProp
for k in nodesProp:
if 'type' in k and 'name' in k:
if k['type'] == 'bool':
propInterface[k['name']] = graph.getBooleanProperty(k['name'])
if k['type'] == 'float':
propInterface[k['name']] = graph.getDoubleProperty(k['name'])
if k['type'] == 'string':
propInterface[k['name']] = graph.getStringProperty(k['name'])
vLayout = graph.getLayoutProperty("viewLayout")
#print propInterface
#getValue = lambda n, propInterface: {prop:propInterface[prop][n] for prop in propInterface }
getValue = lambda x: {prop:propInterface[prop][x] for prop in propInterface }
nList = []
for n in nodeList:
#v = {"name":n.id,"x":vLayout[n][0],"y":vLayout[n][1], "baseID":bID[n]}
v = {"x":vLayout[n][0],"y":vLayout[n][1], "baseID":bID[n]}
v.update(getValue(n))
nList.append(v)
nList = {"nodes":nList}
#print nList
else:
#nList = {"nodes":[{"name":n.id,"x":vLayout[n][0],"y":vLayout[n][1], "baseID":bID[n]} for n in graph.getNodes()]}
nList = {"nodes":[{"x":vLayout[n][0],"y":vLayout[n][1], "baseID":bID[n]} for n in nodeList]}
#nToI = {nList["nodes"][i]["baseID"]:i for i in range(len(nList["nodes"]))}
edgesProp = None
if 'edges' in properties:
edgesProp = properties['edges']
if 'links' in properties:
edgesProp = properties['links']
if edgesProp:
propInterface = {}
#print edgesProp
for k in edgesProp:
if 'type' in k and 'name' in k:
if k['type'] == 'bool':
propInterface[k['name']] = graph.getBooleanProperty(k['name'])
if k['type'] == 'float':
propInterface[k['name']] = graph.getDoubleProperty(k['name'])
if k['type'] == 'string':
propInterface[k['name']] = graph.getStringProperty(k['name'])
vLayout = graph.getLayoutProperty("viewLayout")
#print propInterface
#getValue = lambda n, propInterface: {prop:propInterface[prop][n] for prop in propInterface }
getValue = lambda x: {prop:propInterface[prop][x] for prop in propInterface }
eList = []
for e in edgeList:
v = {"source":bID[graph.source(e)], "target":bID[graph.target(e)], "value":1, "baseID":bID[e]}
v.update(getValue(e))
#print v
#print json.dumps(v)#str(v).decode().encode('utf-8', 'backslashreplace'))
eList.append(v)
eList = {"links":eList}
#print eList
else:
eList = {"links":[{"source":bID[graph.source(e)], "target":bID[graph.target(e)], "value":1, "baseID":bID[e]} for e in edgeList]}
#nList = {"nodes":[{"name":n.id,"x":vLayout[n][0],"y":vLayout[n][1]}.update(getValue(n)) for n in graph.getNodes()]}
#print 'this is nList before appending: ',json.dumps(nList)
#print 'this is eList before appending: ',str(eList)
nList.update(eList)
if 'data' in properties.keys():
nList.update({'data':properties['data']})
#print "dumping: ", nList
#return json.dumps(nList) #json.loads(str(nList))) #.decode().encode('utf-8', 'backslashreplace'))
return json.dumps(nList)
'''
This method applies an inducedSubGraph algorithm to a selection of nodes of a given graph.
jsonGraph, the JSON graph object of the selection
target, the graph to target ('substrate' or 'catalyst')
Returns the induced subgraph
'''
def inducedSubGraph(self, jsonGraph, target, multiplex_property="descriptors"):
nodeList = []
graphNList = []
for n in jsonGraph[u'nodes']:
nodeList.append(n[u'baseID'])
graph = self.substrate
if target == 'catalyst':
graph = self.catalyst[multiplex_property]
#print target,' graph: ', [n for n in graph.getNodes()],' ', [e for e in graph.getEdges()]," with ", nodeList
baseIDP = graph.getDoubleProperty("baseID")
for n in graph.getNodes():
if baseIDP[n] in nodeList:
graphNList.append(n)
#self.graph.clear()
#sg = self.graph.addSubGraph()
g = graph.inducedSubGraph(graphNList)
nList = [n for n in g.getNodes()]
eList = [e for e in g.getEdges()]
for n in graph.getNodes():
if n not in nList:
graph.delNode(n)
for e in graph.getEdges():
if e not in eList:
graph.delEdge(e)
#for n in g.getNodes():
# self.graph.addNode(n)
#for e in g.getEdges():
# self.graph.addEdge(e)
#self.graph = g
gNodeList = [n for n in g.getNodes()]
gEdgeList = [e for e in g.getEdges()]
if graph.numberOfNodes() != len(gNodeList):
for n in graph.getNodes():
if n not in gNodeList:
graph.delNode(n)
if graph.numberOfEdges() != len(gEdgeList):
for e in graph.getEdges():
if e not in gEdgeList:
graph.delEdge(e)
return graph
def updateLayout(self, jsonGraph, target, multiplex_property="descriptors"):
print "updating the layout:"
print "target: ", target
print "jsonGraph: ",jsonGraph
nodeList = {}
for n in jsonGraph[u'nodes']:
nodeList[n[u'baseID']] = [n[u'x'], n[u'y']]
graph = self.substrate
if target == 'catalyst':
graph = self.catalyst[multiplex_property]
baseIDP = graph.getDoubleProperty("baseID")
vLayoutP = graph.getLayoutProperty("viewLayout")
for n in graph.getNodes():
if baseIDP[n] in nodeList:
c = tlp.Coord(nodeList[baseIDP[n]][0], nodeList[baseIDP[n]][1], 0)
vLayoutP[n] = c
'''
Returns a graph with a randomized layout
graph, the graph to apply the random layout algorithm to (default is substrate)
'''
def randomizeGraph(self, graph=0):
if not graph:
graph = self.substrate
##### update for tulip-4.10
#viewL = graph.getLayoutProperty("viewLayout")
#graph.computeLayoutProperty("Random", viewL)
layoutDataSet = tlp.getDefaultPluginParameters("Random", graph)
graph.applyLayoutAlgorithm("Random", layoutDataSet)
for n in graph.getNodes():
viewL[n] *= 10
return graph
'''
Adds a new graph (copied to substrate) and returns it
It iterates over the properties that are passed in the JSON file and accordingly sets the tulip
property interface and values that correspond.
json, the JSON of the graph to add
Returns the new graph
'''
def addGraph(self, json):
g = tlp.newGraph()
#for d3.force.layout import
idToNode = {}
idIndex = 0
for n in json[u'nodes']:
u = g.addNode()
idToNode[n[u'baseID']] = u
# here we should add protection for properties automatic load (warning will crash when diff type w/ same name)
for k in n.keys():
prop = 0
kType = type(n[k])
if kType == int or kType == float:
prop = g.getDoubleProperty(k.encode("utf-8"))
if kType == str:
prop = g.getStringProperty(k.encode("utf-8"))
if kType == bool:
prop = g.getBooleanProperty(k.encode("utf-8"))
if kType == unicode:
propU = g.getStringProperty(k.encode("utf-8"))
propU[u] = n[k].encode("utf-8")
if prop:
prop[u] = n[k]
if u'id' not in n.keys():
prop = g.getDoubleProperty("id")
prop[u] = u.id
for e in json[u'links']:
if u'source' in e.keys() and u'target' in e.keys():
#print 'edge: ',e
v = g.addEdge(idToNode[e[u'source']], idToNode[e[u'target']])
#print e
for k in e.keys():
if k not in [u'source', u'target']:
prop = 0
kType = type(e[k])
#print 'type: ', type(e[k])
if kType == int or kType == float:
prop = g.getDoubleProperty(k.encode("utf-8"))
if kType == str:
prop = g.getStringProperty(k.encode("utf-8"))
if kType == unicode:
propU = g.getStringProperty(k.encode("utf-8"))
propU[v] = e[k].encode("utf-8")
if kType == bool:
prop = g.getBooleanProperty(k.encode("utf-8"))
if prop:
prop[v] = e[k]
if 'id' not in e.keys():
prop = g.getDoubleProperty("id")
prop[v] = v.id
#self.root = g #temporary, we should manage sessions and graphIDs
#self.graph = g
self.substrate = g
#typeP = self.substrate.getStringProperty("_type")
#typeP.setAllNodesValue("substrate")
#typeP.setAllEdgesValue("substrate")
return g
'''
Analyse a graph (or a selection of a graph) copies it to 'catalyst' and send it
back together with the corresponding entanglement values.
jsonGraph, a JSON graph object of a selection of nodes to analyse
return an array containing [the catalyst graph, entanglement intensity, entanglement homogeneity]
'''
def analyseGraph(self, jsonGraph = 0, multiplex_property = "descriptors", weightProperty = ""):
graph = self.substrate
entanglementIntensity = 0
entanglementHomogeneity = 0
onlyOneNode = False
onlySingleEdges = False
print "in graphManager, the weight property: ",weightProperty
if jsonGraph:
nodeList = []
graphNList = []
#print 'the selection: ',jsonGraph
for n in jsonGraph[u'nodes']:
nodeList.append(n[u'baseID'])
baseIDP = self.substrate.getDoubleProperty("baseID")
for n in self.substrate.getNodes():
if baseIDP[n] in nodeList:
graphNList.append(n)
if len(graphNList) > 1:
graph = self.substrate.inducedSubGraph(graphNList)
elif len(graphNList) == 1:
onlyOneNode = True
print "there is only one node in the selection"
# this has to be set because of the clusterAnalysis script
'''
if True:#not graph.existProperty("descripteurs"):
descP = graph.getStringProperty("descripteurs")
o_descP = graph.getStringProperty(multiplex_property)
for n in graph.getNodes():
descP[n] = o_descP[n]
#print 'node ', descP[n]
for e in graph.getEdges():
descP[e] = o_descP[e]
#print 'edge ', descP[e]
'''
labelList = []
if not onlyOneNode:
#c = entanglementAnalysisLgt.EntanglementAnalysis(graph, "descripteurs", _weightProperty = weightProperty)
c = entanglementAnalysisLgt.EntanglementAnalysis(graph, multiplex_property, _weightProperty = weightProperty)
if c.catalystGraph.numberOfNodes() > 0:
resLen = [len(k) for k in c.catalystToEntIndex]
mainComponent = resLen.index(max(resLen))
entanglementIntensity = float(c.entanglementIntensity[mainComponent])
entanglementHomogeneity = float(c.entanglementHomogeneity[mainComponent])
##### update for tulip-4.10
#vL = c.catalystGraph.getLayoutProperty("viewLayout")
#c.catalystGraph.computeLayoutProperty("GEM (Frick)", vL)
layoutDataSet = tlp.getDefaultPluginParameters("GEM (Frick)", c.catalystGraph)
c.catalystGraph.applyLayoutAlgorithm("GEM (Frick)", layoutDataSet)
tName = c.catalystGraph.getStringProperty("catalystName")
label = c.catalystGraph.getStringProperty("label")
baseID = c.catalystGraph.getDoubleProperty("baseID")
#occ = c.catalystGraph.getStringProperty("occurence")
labelToCatalystGraphNode = {}
labelToCatalystGraphEdge = {}
# sets the baseID for persistence
# this should be changed at some point because it is computationnally heavy
for n in c.catalystGraph.getNodes():
label[n] = tName[n]
baseID[n] = n.id
labelToCatalystGraphNode[tName[n]] = n
for e in c.catalystGraph.getEdges():
baseID[e] = e.id
sL = label[c.catalystGraph.source(e)]
tL = label[c.catalystGraph.target(e)]
edgeLabel = sL+';'+tL
if sL > tL:
edgeLabel = tL+';'+sL
label[e] = edgeLabel
labelToCatalystGraphEdge[edgeLabel] = e
#for n in c.catalystGraph.getNodes():
#for n in c.catalystGraph.getEdges():
# print "baseID:", baseID[n], " label:",label[n]
labelList = [label[n] for n in c.catalystGraph.getNodes()]
labelEList = [label[e] for e in c.catalystGraph.getEdges()]
# associates the right baseIDs
#if self.catalyst:
if multiplex_property in self.catalyst:
labelCatalystP = self.catalyst[multiplex_property].getStringProperty("label")
baseIDCatalystP = self.catalyst[multiplex_property].getDoubleProperty("baseID")
nbAssign = 0
for n in self.catalyst[multiplex_property].getNodes():
if labelCatalystP[n] in labelList:
baseID[labelToCatalystGraphNode[labelCatalystP[n]]] = baseIDCatalystP[n]
nbAssign += 1
if nbAssign == len(labelList):
break
nbAssign = 0
for e in self.catalyst[multiplex_property].getEdges():
edgeLabel = labelCatalystP[e]
if edgeLabel in labelEList:
baseID[labelToCatalystGraphEdge[edgeLabel]] = baseIDCatalystP[e]
nbAssign += 1
if nbAssign == len(labelEList):
break
#baseIDcopy = lambda n: baseID[labelToCatalystGraphNode[labelCatalyst[n]]] = baseIDCatalystP[n]
#[baseIDcopy(n) for n in self.catalyst.getNodes() if labelCatalyst[n] in labelList]
#returnGraph = c.typeGraph
if jsonGraph:
return [c.catalystGraph, entanglementIntensity, entanglementHomogeneity]
#if not self.catalyst:
if not multiplex_property in self.catalyst:
self.catalyst[multiplex_property] = tlp.newGraph()
else:
self.catalyst[multiplex_property].clear()
tlp.copyToGraph(self.catalyst[multiplex_property], c.catalystGraph)
return [self.catalyst[multiplex_property], entanglementIntensity, entanglementHomogeneity]
else:
onlySingleEdges = True
if onlyOneNode or onlySingleEdges:
#descP = graph.getStringProperty("descripteurs")
descP = graph.getStringProperty(multiplex_property)
returnGraph = tlp.newGraph()
labelList = []
if onlyOneNode:
labelList = descP[graphNList[0]].split(';')
if onlySingleEdges:
#descP = graph.getStringProperty("descripteurs")
descP = graph.getStringProperty(multiplex_property)
for e in graph.getEdges():
labelList.extend(descP[e].split(';'))
labelList = list(set(labelList))
if multiplex_property in self.catalyst:
#if self.catalyst:
labelCatalystP = self.catalyst[multiplex_property].getStringProperty("label")
baseIDCatalystP = self.catalyst[multiplex_property].getDoubleProperty("baseID")
label = returnGraph.getStringProperty("label")
baseID = returnGraph.getDoubleProperty("baseID")
nbAssign = 0
for n in self.catalyst[multiplex_property].getNodes():
if labelCatalystP[n] in labelList:
nn = returnGraph.addNode()
label[nn] = labelCatalystP[n]
baseID[nn] = baseIDCatalystP[n]
nbAssign += 1
if nbAssign == len(labelList):
break
if jsonGraph:
return [returnGraph, entanglementIntensity, entanglementHomogeneity]
if multiplex_property not in self.catalyst:
#if not self.catalyst:
self.catalyst[multiplex_property] = tlp.newGraph()
else:
self.catalyst[multiplex_property].clear()
return [self.catalyst[multiplex_property], entanglementIntensity, entanglementHomogeneity]
'''
Returns a selection of corresponding substrate nodes from a selection of catalyst nodes.
jsonGraph, a JSON graph object of a selection of nodes to analyse
In the future we should include the entanglement calculation and send it back too.
'''
def synchronizeFromCatalyst(self, jsonGraph, operator, weightProperty=None, multiplex_property="descriptors"):
nodeList = []
graphNList = []
cataList = []
for n in jsonGraph[u'nodes']:
nodeList.append(n[u'baseID'])
baseIDP = self.catalyst[multiplex_property].getDoubleProperty("baseID")
label = self.catalyst[multiplex_property].getStringProperty("label")
for n in self.catalyst[multiplex_property].getNodes():
if baseIDP[n] in nodeList:
graphNList.append(n)
cataList.append(label[n])
nList = []
eList = []
#### adapting for multiple multiplex
#descP = self.substrate.getStringProperty("descripteurs")
#sync = entanglementSynchronization.EntanglementSynchronization(self.substrate, "descripteurs", _weightProperty = weightProperty)
descP = self.substrate.getStringProperty(multiplex_property)
sync = entanglementSynchronization.EntanglementSynchronization(self.substrate, multiplex_property, _weightProperty = weightProperty)
syncRes = sync.synchronizeFromCatalyst(cataList, _operator=operator)
toPrint = [n for n in syncRes['substrate'].getNodes()]
resLen = [len(k) for k in syncRes['catalystAnalysis'].catalystToEntIndex]
mainComponent = resLen.index(max(resLen))
entanglementIntensity = float(syncRes['catalystAnalysis'].entanglementIntensity[mainComponent])
entanglementHomogeneity = float(syncRes['catalystAnalysis'].entanglementHomogeneity[mainComponent])
return self.graphToJSON(syncRes['substrate'], {'nodes':[{'type':'string', 'name':'label'}], 'data':{'entanglement intensity':entanglementIntensity, 'entanglement homogeneity':entanglementHomogeneity}})
def synchronizeLayouts(self, multiplex_property="descriptors"):
bLResult = harmonizedLayout.draw(self.substrate, self.catalyst[multiplex_property], descPName=multiplex_property)
#print "layout synchronized ",vectors
#vector = {k:v for k,v in bLResult.items() if k != "graph"}
resGraph = bLResult['graph']
#source of the resultint graph edges are always of type 'substrate'
jsresgraph = self.graphToJSON(resGraph, {'nodes':[{'type':'string', 'name':'label'}, {'type':'string', 'name':'_type'}]})
bLResult['graph'] = jsresgraph
return self.graphToJSON(self.substrate, {'nodes':[{'type':'float', 'name':'weight'}, {'type':'string', 'name':'label'}, {'type':'float', 'name':'entanglementIndex'}], 'data':bLResult})
'''
Applies a layout algorithm on a graph and returns it.
layoutName, the name of the layout algorithm to call
graphTarget, the string value of the graph onto apply the algorithm (substrate or catalyst)
'''
def callLayoutAlgorithm(self, layoutName, graphTarget, multiplex_property="descriptors"):
g = self.substrate
if graphTarget == 'catalyst':
g = self.catalyst[multiplex_property]
##### update for tulip-4.10
#vL = g.getLayoutProperty("viewLayout")
#g.computeLayoutProperty(layoutName, vL)
layoutDataSet = tlp.getDefaultPluginParameters(layoutName, g)
g.applyLayoutAlgorithm(layoutName, layoutDataSet)
return g
'''
Applies a double (metric) algorithm on a graph and returns it.
doubleName, the name of the double algorithm to call
graphTarget, the string value of the graph onto apply the algorithm (substrate or catalyst)
'''
def callDoubleAlgorithm(self, doubleName, graphTarget, multiplex_property="descriptors"):
g = self.substrate
if graphTarget == 'catalyst':
g = self.catalyst[multiplex_property]
print 'computing double algorithm: ',doubleName,' on ',graphTarget,' with ' ,g.numberOfNodes(), ' / ', g.numberOfEdges()
vM = g.getDoubleProperty("viewMetric")
viewLabel = g.getStringProperty("catalyst")
##### update for tulip-4.10
#g.computeDoubleProperty(doubleName, vM)
doubleDataSet = tlp.getDefaultPluginParameters(doubleName, g)
g.applyDoubleAlgorithm(doubleName, doubleDataSet)
#print "the computation result"
#print [vM[n] for n in g.getNodes()]
#print [[e.id, g.source(e), g.target(e)] for e in g.getEdges()]
print {viewLabel[n]: vM[n] for n in g.getNodes()}
return g
| 27,529 | 7,359 |
# coding: utf-8
"""
Functions for working with pitch data
This file depends on the praat script get_pitch_and_intensity.praat
(which depends on praat) to extract pitch and intensity values from
audio data. Once the data is extracted, there are functions for
data normalization and calculating various measures from the time
stamped output of the praat script (ie **generatePIMeasures()**)
For brevity, 'pitch_and_intensity' is referred to as 'PI'
see **examples/get_pitch_and_formants.py**
"""
import os
from os.path import join
import io
import math
from typing import List, Tuple, Optional, cast
from praatio import data_points
from praatio import praatio_scripts
from praatio import textgrid
from praatio.utilities import errors
from praatio.utilities import my_math
from praatio.utilities import utils
from praatio.utilities.constants import Point
HERTZ = "Hertz"
UNSPECIFIED = "unspecified"
_PITCH_ERROR_TIER_NAME = "pitch errors"
def _extractPIPiecewise(
inputFN: str,
outputFN: str,
praatEXE: str,
minPitch: float,
maxPitch: float,
tgFN: str,
tierName: str,
tmpOutputPath: str,
sampleStep: float = 0.01,
silenceThreshold: float = 0.03,
pitchUnit: str = HERTZ,
forceRegenerate: bool = True,
undefinedValue: float = None,
medianFilterWindowSize: int = 0,
pitchQuadInterp: bool = False,
) -> List[Tuple[float, ...]]:
"""
Extracts pitch and int from each labeled interval in a textgrid
This has the benefit of being faster than using _extractPIFile if only
labeled regions need to have their pitch values sampled, particularly
for longer files.
Returns the result as a list. Will load the serialized result
if this has already been called on the appropriate files before
"""
outputPath = os.path.split(outputFN)[0]
utils.makeDir(outputPath)
windowSize = medianFilterWindowSize
if not os.path.exists(inputFN):
raise errors.ArgumentError(f"Required folder does not exist: f{inputFN}")
firstTime = not os.path.exists(outputFN)
if firstTime or forceRegenerate is True:
utils.makeDir(tmpOutputPath)
splitAudioList = praatio_scripts.splitAudioOnTier(
inputFN, tgFN, tierName, tmpOutputPath, False
)
allPIList: List[Tuple[str, str, str]] = []
for start, _, fn in splitAudioList:
tmpTrackName = os.path.splitext(fn)[0] + ".txt"
piList = _extractPIFile(
join(tmpOutputPath, fn),
join(tmpOutputPath, tmpTrackName),
praatEXE,
minPitch,
maxPitch,
sampleStep,
silenceThreshold,
pitchUnit,
forceRegenerate=True,
medianFilterWindowSize=windowSize,
pitchQuadInterp=pitchQuadInterp,
)
convertedPiList = [
("%0.3f" % (float(time) + start), str(pV), str(iV))
for time, pV, iV in piList
]
allPIList.extend(convertedPiList)
outputData = [",".join(row) for row in allPIList]
with open(outputFN, "w") as fd:
fd.write("\n".join(outputData) + "\n")
return loadTimeSeriesData(outputFN, undefinedValue=undefinedValue)
def _extractPIFile(
inputFN: str,
outputFN: str,
praatEXE: str,
minPitch: float,
maxPitch: float,
sampleStep: float = 0.01,
silenceThreshold: float = 0.03,
pitchUnit: str = HERTZ,
forceRegenerate: bool = True,
undefinedValue: float = None,
medianFilterWindowSize: int = 0,
pitchQuadInterp: bool = False,
) -> List[Tuple[float, ...]]:
"""
Extracts pitch and intensity values from an audio file
Returns the result as a list. Will load the serialized result
if this has already been called on the appropriate files before
"""
outputPath = os.path.split(outputFN)[0]
utils.makeDir(outputPath)
if not os.path.exists(inputFN):
raise errors.ArgumentError(f"Required folder does not exist: f{inputFN}")
firstTime = not os.path.exists(outputFN)
if firstTime or forceRegenerate is True:
# The praat script uses append mode, so we need to clear any prior
# result
if os.path.exists(outputFN):
os.remove(outputFN)
if pitchQuadInterp is True:
doInterpolation = 1
else:
doInterpolation = 0
argList = [
inputFN,
outputFN,
sampleStep,
minPitch,
maxPitch,
silenceThreshold,
pitchUnit,
-1,
-1,
medianFilterWindowSize,
doInterpolation,
]
scriptName = "get_pitch_and_intensity.praat"
scriptFN = join(utils.scriptsPath, scriptName)
utils.runPraatScript(praatEXE, scriptFN, argList)
return loadTimeSeriesData(outputFN, undefinedValue=undefinedValue)
def extractIntensity(
inputFN: str,
outputFN: str,
praatEXE: str,
minPitch: float,
sampleStep: float = 0.01,
forceRegenerate: bool = True,
undefinedValue: float = None,
) -> List[Tuple[float, ...]]:
"""
Extract the intensity for an audio file
Calculates intensity using the following praat command:
https://www.fon.hum.uva.nl/praat/manual/Sound__To_Intensity___.html
"""
outputPath = os.path.split(outputFN)[0]
utils.makeDir(outputPath)
if not os.path.exists(inputFN):
raise errors.ArgumentError(f"Required folder does not exist: f{inputFN}")
firstTime = not os.path.exists(outputFN)
if firstTime or forceRegenerate is True:
# The praat script uses append mode, so we need to clear any prior
# result
if os.path.exists(outputFN):
os.remove(outputFN)
argList = [inputFN, outputFN, sampleStep, minPitch, -1, -1]
scriptName = "get_intensity.praat"
scriptFN = join(utils.scriptsPath, scriptName)
utils.runPraatScript(praatEXE, scriptFN, argList)
return loadTimeSeriesData(outputFN, undefinedValue=undefinedValue)
def extractPitchTier(
wavFN: str,
outputFN: str,
praatEXE: str,
minPitch: float,
maxPitch: float,
sampleStep: float = 0.01,
silenceThreshold: float = 0.03,
forceRegenerate: bool = True,
medianFilterWindowSize: int = 0,
pitchQuadInterp: bool = False,
) -> data_points.PointObject2D:
"""
Extract pitch at regular intervals from the input wav file
Data is output to a text file and then returned in a list in the form
[(timeV1, pitchV1), (timeV2, pitchV2), ...]
sampleStep - the frequency to sample pitch at
silenceThreshold - segments with lower intensity won't be analyzed
for pitch
forceRegenerate - if running this function for the same file, if False
just read in the existing pitch file
pitchQuadInterp - if True, quadratically interpolate pitch
Calculates pitch using the following praat command:
https://www.fon.hum.uva.nl/praat/manual/Sound__To_Pitch___.html
"""
outputPath = os.path.split(outputFN)[0]
utils.makeDir(outputPath)
if pitchQuadInterp is True:
doInterpolation = 1
else:
doInterpolation = 0
if not os.path.exists(wavFN):
raise errors.ArgumentError(f"Required file does not exist: f{wavFN}")
firstTime = not os.path.exists(outputFN)
if firstTime or forceRegenerate is True:
if os.path.exists(outputFN):
os.remove(outputFN)
argList = [
wavFN,
outputFN,
sampleStep,
minPitch,
maxPitch,
silenceThreshold,
medianFilterWindowSize,
doInterpolation,
]
scriptName = "get_pitchtier.praat"
scriptFN = join(utils.scriptsPath, scriptName)
utils.runPraatScript(praatEXE, scriptFN, argList)
return data_points.open2DPointObject(outputFN)
def extractPitch(
wavFN: str,
outputFN: str,
praatEXE: str,
minPitch: float,
maxPitch: float,
sampleStep: float = 0.01,
silenceThreshold: float = 0.03,
forceRegenerate: bool = True,
undefinedValue: float = None,
medianFilterWindowSize: int = 0,
pitchQuadInterp: bool = False,
) -> List[Tuple[float, ...]]:
"""
Extract pitch at regular intervals from the input wav file
Data is output to a text file and then returned in a list in the form
[(timeV1, pitchV1), (timeV2, pitchV2), ...]
sampleStep - the frequency to sample pitch at
silenceThreshold - segments with lower intensity won't be analyzed
for pitch
forceRegenerate - if running this function for the same file, if False
just read in the existing pitch file
undefinedValue - if None remove from the dataset, otherset set to
undefinedValue
pitchQuadInterp - if True, quadratically interpolate pitch
Calculates pitch using the following praat command:
https://www.fon.hum.uva.nl/praat/manual/Sound__To_Pitch___.html
"""
outputPath = os.path.split(outputFN)[0]
utils.makeDir(outputPath)
if pitchQuadInterp is True:
doInterpolation = 1
else:
doInterpolation = 0
if not os.path.exists(wavFN):
raise errors.ArgumentError(f"Required file does not exist: f{wavFN}")
firstTime = not os.path.exists(outputFN)
if firstTime or forceRegenerate is True:
if os.path.exists(outputFN):
os.remove(outputFN)
argList = [
wavFN,
outputFN,
sampleStep,
minPitch,
maxPitch,
silenceThreshold,
-1,
-1,
medianFilterWindowSize,
doInterpolation,
]
scriptName = "get_pitch.praat"
scriptFN = join(utils.scriptsPath, scriptName)
utils.runPraatScript(praatEXE, scriptFN, argList)
return loadTimeSeriesData(outputFN, undefinedValue=undefinedValue)
def extractPI(
inputFN: str,
outputFN: str,
praatEXE: str,
minPitch: float,
maxPitch: float,
sampleStep: float = 0.01,
silenceThreshold: float = 0.03,
pitchUnit: str = HERTZ,
forceRegenerate: bool = True,
tgFN: str = None,
tierName: str = None,
tmpOutputPath: str = None,
undefinedValue: float = None,
medianFilterWindowSize: int = 0,
pitchQuadInterp: bool = False,
) -> List[Tuple[float, ...]]:
"""
Extracts pitch and intensity from a file wholesale or piecewise
If the parameters for a tg are passed in, this will only extract labeled
segments in a tier of the tg. Otherwise, pitch will be extracted from
the entire file.
male: minPitch=50; maxPitch=350
female: minPitch=75; maxPitch=450
pitchUnit: "Hertz", "semitones re 100 Hz", etc
Calculates pitch and intensity using the following praat command:
https://www.fon.hum.uva.nl/praat/manual/Sound__To_Pitch___.html
https://www.fon.hum.uva.nl/praat/manual/Sound__To_Intensity___.html
"""
outputPath = os.path.split(outputFN)[0]
windowSize = medianFilterWindowSize
if tgFN is None or tierName is None:
piList = _extractPIFile(
inputFN,
outputFN,
praatEXE,
minPitch,
maxPitch,
sampleStep,
silenceThreshold,
pitchUnit,
forceRegenerate,
undefinedValue=undefinedValue,
medianFilterWindowSize=windowSize,
pitchQuadInterp=pitchQuadInterp,
)
else:
if tmpOutputPath is None:
tmpOutputPath = join(outputPath, "piecewise_output")
piList = _extractPIPiecewise(
inputFN,
outputFN,
praatEXE,
minPitch,
maxPitch,
tgFN,
tierName,
tmpOutputPath,
sampleStep,
silenceThreshold,
pitchUnit,
forceRegenerate,
undefinedValue=undefinedValue,
medianFilterWindowSize=windowSize,
pitchQuadInterp=pitchQuadInterp,
)
return piList
def loadTimeSeriesData(
fn: str, undefinedValue: float = None
) -> List[Tuple[float, ...]]:
"""
For reading the output of get_pitch_and_intensity or get_intensity
Data should be of the form
[(time1, value1a, value1b, ...),
(time2, value2a, value2b, ...), ]
"""
name = os.path.splitext(os.path.split(fn)[1])[0]
try:
with io.open(fn, "r", encoding="utf-8") as fd:
data = fd.read()
except IOError:
print(f"No pitch track for: {name}")
raise
dataList = [row.split(",") for row in data.splitlines() if row != ""]
# The new praat script includes a header
if dataList[0][0] == "time":
dataList = dataList[1:]
newDataList = []
for row in dataList:
time = float(row.pop(0))
entry = [
time,
]
doSkip = False
for value in row:
if "--" in value:
if undefinedValue is not None:
appendValue = undefinedValue
else:
doSkip = True
break
else:
appendValue = float(value)
entry.append(appendValue)
if doSkip is True:
continue
newDataList.append(tuple(entry))
return newDataList
def generatePIMeasures(
dataList: List[Tuple[float, float, float]],
tgFN: str,
tierName: str,
doPitch: bool,
medianFilterWindowSize: int = None,
globalZNormalization: bool = False,
localZNormalizationWindowSize: int = 0,
) -> List[Tuple[float, ...]]:
"""
Generates processed values for the labeled intervals in a textgrid
nullLabelList - labels to ignore in the textgrid. Defaults to ["",]
if 'doPitch'=true get pitch measures; if =false get rms intensity
medianFilterWindowSize: if none, no filtering is done
globalZNormalization: if True, values are normalized with the mean
and stdDev of the data in dataList
localZNormalization: if greater than 1, values are normalized with the mean
and stdDev of the local context (for a window of 5, it
would consider the current value, 2 values before and 2
values after)
"""
# Warn user that normalizing a second time nullifies the first normalization
if globalZNormalization is True and localZNormalizationWindowSize > 0:
raise errors.NormalizationException()
castDataList = cast(List[Tuple[float, ...]], dataList)
if globalZNormalization is True:
if doPitch:
castDataList = my_math.znormalizeSpeakerData(castDataList, 1, True)
else:
castDataList = my_math.znormalizeSpeakerData(castDataList, 2, True)
# Raw values should have 0 filtered; normalized values are centered around 0, so don't filter
filterZeroFlag = not globalZNormalization
tg = textgrid.openTextgrid(tgFN, False)
if not isinstance(tg.tierDict[tierName], textgrid.IntervalTier):
raise errors.IncompatibleTierError(tg.tierDict[tierName])
tier = cast(textgrid.IntervalTier, tg.tierDict[tierName])
piData = tier.getValuesInIntervals(castDataList)
outputList: List[List[float]] = []
for interval, entryList in piData:
label = interval[0]
if doPitch:
tmpValList = [f0Val for _, f0Val, _ in entryList]
f0Measures = getPitchMeasures(
tmpValList, tgFN, label, medianFilterWindowSize, filterZeroFlag
)
outputList.append(list(f0Measures))
else:
tmpValList = [intensityVal for _, _, intensityVal in entryList]
if filterZeroFlag:
tmpValList = [
intensityVal for intensityVal in tmpValList if intensityVal != 0.0
]
rmsIntensity = 0.0
if len(tmpValList) != 0:
rmsIntensity = my_math.rms(tmpValList)
outputList.append(
[
rmsIntensity,
]
)
# Locally normalize the output
if localZNormalizationWindowSize > 0 and len(outputList) > 0:
for colI in range(len(outputList[0])):
featValList = [row[colI] for row in outputList]
featValList = my_math.znormWindowFilter(
featValList, localZNormalizationWindowSize, True, True
)
if len(featValList) != len(outputList): # This should hopefully not happen
raise errors.UnexpectedError(
"Lists must be of the same length but are not: "
f"({len(featValList)}), ({len(outputList)})"
)
for i, val in enumerate(featValList):
outputList[i][colI] = val
return [tuple(row) for row in outputList]
def getPitchMeasures(
f0Values: List[float],
name: str = None,
label: str = None,
medianFilterWindowSize: int = None,
filterZeroFlag: bool = False,
) -> Tuple[float, float, float, float, float, float]:
"""
Get various measures (min, max, etc) for the passed in list of pitch values
name is the name of the file. Label is the label of the current interval.
Both of these labels are only used debugging and can be ignored if desired.
medianFilterWindowSize: None -> no median filtering
filterZeroFlag:True -> zero values are removed
"""
if name is None:
name = UNSPECIFIED
if label is None:
label = UNSPECIFIED
if medianFilterWindowSize is not None:
f0Values = my_math.medianFilter(
f0Values, medianFilterWindowSize, useEdgePadding=True
)
if filterZeroFlag:
f0Values = [f0Val for f0Val in f0Values if int(f0Val) != 0]
if len(f0Values) == 0:
myStr = f"No pitch data for file: {name}, label: {label}"
print(myStr.encode("ascii", "replace"))
counts = 0.0
meanF0 = 0.0
maxF0 = 0.0
minF0 = 0.0
rangeF0 = 0.0
variance = 0.0
std = 0.0
else:
counts = float(len(f0Values))
meanF0 = sum(f0Values) / counts
maxF0 = max(f0Values)
minF0 = min(f0Values)
rangeF0 = maxF0 - minF0
variance = sum([(val - meanF0) ** 2 for val in f0Values]) / counts
std = math.sqrt(variance)
return (meanF0, maxF0, minF0, rangeF0, variance, std)
def detectPitchErrors(
pitchList: List[Tuple[float, float]],
maxJumpThreshold: float = 0.70,
tgToMark: Optional[textgrid.Textgrid] = None,
) -> Tuple[List[Point], Optional[textgrid.Textgrid]]:
"""
Detect pitch halving and doubling errors.
If a textgrid is passed in, it adds the markings to the textgrid
"""
if maxJumpThreshold < 0 or maxJumpThreshold > 1:
raise errors.ArgumentError(
f"'maxJumpThreshold' must be between 0 and 1. Was given ({maxJumpThreshold})"
)
tierName = _PITCH_ERROR_TIER_NAME
if tgToMark is not None and tierName in tgToMark.tierNameList:
raise errors.ArgumentError(
f"Tier name '{tierName}' is already in provided textgrid"
)
errorList = []
for i in range(1, len(pitchList)):
lastPitch = pitchList[i - 1][1]
currentPitch = pitchList[i][1]
ceilingCutoff = currentPitch / maxJumpThreshold
floorCutoff = currentPitch * maxJumpThreshold
if (lastPitch <= floorCutoff) or (lastPitch >= ceilingCutoff):
currentTime = pitchList[i][0]
errorList.append(Point(currentTime, str(currentPitch / lastPitch)))
if tgToMark is not None:
pointTier = textgrid.PointTier(
tierName, errorList, tgToMark.minTimestamp, tgToMark.maxTimestamp
)
tgToMark.addTier(pointTier)
return errorList, tgToMark
| 20,151 | 6,247 |
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils
# return the best three results
def top_n(matrix_prob, label_map):
ans = []
for line in matrix_prob:
rank = [label_map[item[0]] for item in sorted(enumerate(line), key=lambda v:v[1], reverse=True)]
ans.append(rank[:3])
return ans
# basic neural network model
def basic_model():
model = Sequential()
model.add(Dense(output_dim=500, input_dim=100, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(output_dim=42, input_dim=500, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
if __name__ == '__main__':
X = pd.read_csv('./data/triple_train_x_mean.txt', header=None, encoding='utf-8')
Y = pd.read_csv('./data/triple_train_y.txt', header=None, encoding='utf-8')
X_test = pd.read_csv('./data/triple_test_x_mean.txt', header=None, encoding='utf-8')
matrix_y = np_utils.to_categorical(Y,42)
# KerasClassifier analysis
classifier = KerasClassifier(build_fn=basic_model, nb_epoch=10, batch_size=500)
classifier.fit(X, Y)
pred_prob = classifier.predict_proba(X_test)
with open('./model/task2_label_space.txt', encoding='utf-8') as flabel:
label_map = flabel.read().split()
pd.DataFrame(top_n(pred_prob, label_map)).to_csv('./data/task2_ans_int_index.txt', index=None, header=None, encoding='utf-8')
| 1,488 | 568 |
import numpy as np
import cv2
import os
import time
import requests
import shutil
def get_route_tile(x, y, out_file):
#http://mt1.google.com/vt/lyrs=y&x=5975&y=2598&z=13
url = 'http://mt1.google.com/vt/lyrs=y&x={}&y={}&z=13'.format(x, y)
response = requests.get(url, stream=True)
with open(out_file, 'wb') as file:
shutil.copyfileobj(response.raw, file)
del response
def union(all_x, all_y, path):
x_layers = []
for x_index in range(all_x):
file_path = os.path.join(path, "_".join(map(str, [x_index, 0])))
print(file_path)
img = cv2.imread(file_path)
for y_index in range(1, all_y):
file_path = os.path.join(path, "_".join(map(str, [x_index, y_index])))
print(file_path)
if os.path.exists(file_path) and os.path.isfile(file_path):
print(img.shape)
img = np.concatenate((img, cv2.imread(file_path)), axis=0)
else:
print("fail")
break
x_layers.append(img)
final_image = x_layers[0]
for layer in range(1, all_x):
final_image = np.concatenate((final_image, x_layers[layer]), axis=1)
cv2.imwrite(os.path.join(path, 'map.png'), final_image)
return final_image
def main():
"""
https://api.openstreetmap.org/api/0.6/map?bbox=82.54715,54.839455,83.182984,55.103517
https://sat02.maps.yandex.net/tiles?l=sat&v=3.465.0&x=2989&y=1297&z=12&lang=ru_RU
"""
city_min_x = 5975
city_max_x = 5989
city_min_y = 2582
city_max_y = 2597
all_x = city_max_x - city_min_x + 1
all_y = city_max_y - city_min_y + 1
path = './google_tiles_' + str(13) + '/'
for x_index in range(5975, 5990):
for y_index in range(2582, 2598):
file_name = os.path.join(path, "_".join(map(str, [x_index, y_index])) + '.png')
get_route_tile(x_index, y_index, file_name)
time.sleep(0.1)
final_image = union(all_x, all_y, path)
if __name__ == '__main__':
main() | 2,012 | 870 |
"""Example __init__.py to wrap the wod_latency_submission module imports."""
from . import model
initialize_model = model.initialize_model
run_model = model.run_model
DATA_FIELDS = model.DATA_FIELDS
| 200 | 64 |
# -*- coding: utf-8 -*-
"""
Clustering Methods
"""
import numpy as np
from sklearn.cluster import KMeans
from sklearn.preprocessing import normalize
class ClusteringMethods:
""" Base class of clustering methods """
@staticmethod
def _normalize_values(arr, norm=None, axis=None):
"""
Normalize values in array by column
Parameters
----------
arr : ndarray
ndarray of values extracted from meta
shape (n samples, with m features)
norm : str
Normalization method to use (see sklearn.preprocessing.normalize)
if None range normalize
axis : int
Axis to normalize along
Returns
---------
arr : ndarray
array with values normalized by column
shape (n samples, with m features)
"""
if norm:
arr = normalize(arr, norm=norm, axis=axis)
else:
if np.issubdtype(arr.dtype, np.integer):
arr = arr.astype(float)
min_all = arr.min(axis=axis)
max_all = arr.max(axis=axis)
range_all = max_all - min_all
if axis is not None:
pos = range_all == 0
range_all[pos] = 1
arr -= min_all
arr /= range_all
return arr
@staticmethod
def kmeans(data, **kwargs):
""" Cluster based on kmeans methodology """
kmeans = KMeans(random_state=0, **kwargs)
results = kmeans.fit(data)
labels = results.labels_
# Create deterministic cluster labels based on size
label_n, l_size = np.unique(labels, return_counts=True)
idx = np.argsort(l_size)
l_mapping = dict(zip(label_n[idx], label_n))
sorted_labels = labels.copy()
for k, v in l_mapping.items():
sorted_labels[labels == k] = v
return sorted_labels
| 1,933 | 555 |
from .controller_sorteosLnac import sorteosLnac | 47 | 17 |
"""
This folder contains training loops and accompanying loggers and listeners
"""
| 87 | 22 |
import os
def boot():
print
print (' _____ _____ _____ _____ _____ _______ _____ ')
print (' /\ \ /\ \ /\ \ /\ \ /\ \ /::\ \ /\ \ ')
print (' /::\ \ /::\____\ /::\ \ /::\ \ /::\ \ /::::\ \ /::\___ \ ')
print (' /::::\ \ /:::/ / \:::\ \ /::::\ \ \:::\ \ /::::::\ \ /::::| | ')
print (' /::::::\ \ /:::/ / \:::\ \ /::::::\ \ \:::\ \ /::::::::\ \ /:::::| | ')
print (' /:::/\:::\ \ /:::/ / \:::\ \ /:::/\:::\ \ \:::\ \ /:::/~~\:::\ \ /::::::| | ')
print (' /:::/__\:::\ \ /:::/____/ \:::\ \ /:::/__\:::\ \ \:::\ \ /:::/ \:::\ \ /:::/|::| | ')
print (' /::::\ \:::\ \ /::::\ \ /::::\ \ /::::\ \:::\ \ /::::\ \ /:::/ / \:::\ \ /:::/ |::| | ')
print (' /::::::\ \:::\ \ /::::::\____\________ /::::::\ \ /::::::\ \:::\ \ ____ /::::::\ \ /:::/____/ \:::\____\ /:::/ |::| | _____ ')
print (' /:::/\:::\ \:::\ \ /:::/\:::::::::::\ \ /:::/\:::\ \ /:::/\:::\ \:::\ \ /\ \ /:::/\:::\ \ |:::| | |:::| | /:::/ |::| |/\ \ ')
print ('/:::/ \:::\ \:::\____\/:::/ |:::::::::::\____\ /:::/ \:::\____\/:::/ \:::\ \:::\____\/::\ \/:::/ \:::\____\|:::|____| |:::| |/:: / |::| /::\___ \ ')
print ('\::/ \:::\ /:::/ /\::/ |::|~~~|~~~~~ /:::/ \::/ /\::/ \:::\ /:::/ /\:::\ /:::/ \::/ / \:::\ \ /:::/ / \::/ /|::| /:::/ / ')
print (' \/____/ \:::\/:::/ / \/____|::| | /:::/ / \/____/ \/____/ \:::\/:::/ / \:::\/:::/ / \/____/ \:::\ \ /:::/ / \/____/ |::| /:::/ / ')
print (' \::::::/ / |::| | /:::/ / \::::::/ / \::::::/ / \:::\ /:::/ / |::|/:::/ / ')
print (' \::::/ / |::| | /:::/ / \::::/ / \::::/____/ \:::\__/:::/ / |::::::/ / ')
print (' /:::/ / |::| | \::/ / /:::/ / \:::\ \ \::::::::/ / |:::::/ / ')
print (' /:::/ / |::| | \/____/ /:::/ / \:::\ \ \::::::/ / |::::/ / ')
print (' /:::/ / |::| | /:::/ / \:::\ \ \::::/ / /:::/ / ')
print (' /:::/ / \::| | /:::/ / \:::\____\ \::/____/ /:::/ / ')
print (' \::/ / \:| | \::/ / \::/ / ~~ \::/ / ')
print (' \/____/ \|___| \/____/ \/____/ \/____/ ')
#try:
# input ('Press enter to continue:')
#except NameError:
# pass
os.system('read -s -n 1 -p "Press any key to continue..."')
print | 3,902 | 1,275 |
#Python的内建模块itertools提供了非常有用的用于操作迭代对象的函数。
import itertools
#从10开始数自然数
naturals =itertools.count(10)
from collections import Iterator
#判断naturals的类型
print(isinstance(naturals,Iterator))
for x in naturals:
if x>70:
break
print(x)
#cycle()会把传入的一个序列无限重复下去:
cycles =itertools.cycle("szhualeilei")
print(isinstance(cycles,Iterator))
n =0
for x in cycles :
#print(x)
n+=1
if n >100:
break
#repeat 重复
repeats =itertools.repeat("szhua",10)
for x in repeats:
print(x)
inter =(x**2 for x in range(100) if x%2==0and x%3==0)
#使用take while对Iterrator进行过滤:
ns =itertools.takewhile(lambda x :x<1000,inter)
print(list(ns))
#chain()
#chain()可以把一组迭代对象串联起来,形成一个更大的迭代器:
print(list(itertools.chain("fjksjdfk","abcdefghijklmn")))
#groupby()
#groupby()把迭代器中相邻的重复元素挑出来放在一起:
for key ,value in itertools.groupby("aaajjjfdsfkkkfffff"):
print(str(key).upper(),list(value))
| 918 | 478 |
# 该模块提供了一个数据库的通用CURD接口
# 通过该接口能够快速进行数据库的增删查改功能
# 该模块还提供了获取数据库所有表表名,各表表头的接口
import traceback
import pymysql
from userManage import commmitChangeToUserlist, privilegeOfUser, ifManage
global db
# TODO: improve the robustness
def checkValibleTableName(targetTable, user):
if user != None and targetTable == 'user_list':
return user in getSuperUser()
return targetTable != None
def commitChangeToDatabase(oldInfo, newInfo, targetTable, user = None):
returnStatu = changeProcess(oldInfo, newInfo, targetTable, user)
if returnStatu == 0:
info = '错误的数据格式!'
elif returnStatu == -1:
info = '该表不存在!'
elif returnStatu == -2:
info = '非法访问:未经过用户认证'
elif returnStatu == -3:
info = '非法访问:用户无该权限'
elif returnStatu == -4:
info = '错误的数据格式:管理员用户拥有增删查改所有权限'
elif returnStatu == -5:
info = '用户名重复'
elif returnStatu == 1:
info = '运行成功!'
else:
info = '未知错误!'
return {'statu': returnStatu, 'info': info}
# this function call updataItem, insertItem, deleteItem
# according to the oldInfo and newInfo
# if oldInfo is None, call insert
# if newInfo is None, call delete
# else, call updata
#
# OK code: return 1
# error code:
# 0 : sql run time error
# -1 : invalid target table
# -2 : user is None
# -3 : user has not target privilege
# -4 : manager's privilege is not 'YYYY'
# -5 : user name chongfu
def changeProcess(oldInfo, newInfo, targetTable, user = None):
if user == None:
return -2
userPrivilege = privilegeOfUser(user).get('privilege')
global db
db = pymysql.connect(host="localhost", port=3306, db="yukiyu", user="jhchen", password="123456",charset='utf8')
if oldInfo == None and newInfo == None or not checkValibleTableName(targetTable, user):
print('error ! invalid change!')
print('oldInfo:', oldInfo)
print('newInfo:', newInfo)
print('targetTable:', targetTable)
return -1
returnStatus = 0
if targetTable == 'user_list':
if ifManage(user) == 'Y':
return commmitChangeToUserlist(oldInfo, newInfo)
else:
return -3
if oldInfo == None:
if userPrivilege[1] == 'Y':
returnStatus = insertItem(newInfo, targetTable)
else:
returnStatus = -3
elif newInfo == None:
if userPrivilege[3] == 'Y':
returnStatus = deleteItem(oldInfo, targetTable)
else:
returnStatus = -3
else:
if userPrivilege[1] == 'Y':
returnStatus = updateItem(oldInfo, newInfo, targetTable)
else:
returnStatus = -3
return returnStatus
# shuffle : ((a,),(b,),(c,)) --> (a, b, c)
def signColumnsShuffle(input):
res = []
for i in input:
res.append(i[0])
return res
# shuffle datetime.date to str: 2021-02-20
def datetimeShffle(input):
res = []
for i in input:
temp = []
for k in i:
temp.append(str(k))
res.append(temp)
return res
def getTableHead(tableName):
print('start to get table head from ' + tableName)
cursor = db.cursor()
sql = "select column_name from information_schema.columns as col where col.table_name='%s'"%tableName
print('start to execute:')
print(sql)
cursor.execute(sql)
res = cursor.fetchall()
res = signColumnsShuffle(res)
print('success ! \nget result: ')
print(res)
cursor.close()
return res
def getTableData(tableName):
cursor = db.cursor()
print('start to get table data from ' + tableName)
sql = "select * from %s"%tableName
# print('start to execute:')
# print(sql)
cursor.execute(sql)
res = cursor.fetchall()
res = datetimeShffle(res)
print(res)
cursor.close()
return res
def getSuperUser():
cursor = db.cursor()
sql = "select name from user_list where if_manager = 'Y'"
print('start to execute:')
print(sql)
cursor.execute(sql)
res = cursor.fetchall()
res = signColumnsShuffle(res)
print('execute success!')
print('result:' ,res)
cursor.close()
return res
def getTableNames(user):
cursor = db.cursor()
print('start to get table names from yukiyu')
sql = "select table_name from information_schema.tables as tb where tb.table_schema = 'yukiyu'"
cursor.execute(sql)
res = cursor.fetchall()
res = signColumnsShuffle(res)
print('success ! \nget result: ')
print(res)
cursor.close()
# 非超级用户不允许查看user列表
if user not in getSuperUser():
res.remove('user_list')
# 将主表放在最前面
res.remove('bangumi_list')
res.insert(0, 'bangumi_list')
return res
# get all tables, including table names and data
def getDatabase(target, user):
global db
db = pymysql.connect(host="localhost", port=3306, db="yukiyu", user="jhchen", password="123456",charset='utf8')
print('get url args:')
print(target)
res = {}
selectPriv = privilegeOfUser(user).get('privilege')[0]
for key in target:
if target[key] != 'tables':
# 获取数据表中的表头
res[target[key]+'Header'] = getTableHead(target[key])
# 获取数据表中的所有数据
if selectPriv == 'Y':
res[target[key]] = getTableData(target[key])
else:
res[target[key]] = None
else:
# 获取数据库中的所有数据表名
res['tableList'] = getTableNames(user)
return res
# return the string: key1=value1 seperate key2=valuue2...
def getKeyValueString(name, data, seperate=','):
res = ''
seperate = ' ' + seperate + ' '
length = len(name)
for i in range(length):
res += (name[i] + '=' + "'" + str(data[i]) + "'")
if i != length - 1:
res += seperate
return res
# return the string: value1 seperate value2...
# if strlization is True, when the data[i] is str, the value will be: 'value'
def getValueString(data, seperate=',', strlization = False):
seperate = ' ' + seperate + ' '
res = ''
strlize = ''
if strlization == True:
strlize = "'"
length = len(data)
for i in range(length):
res += (strlize + str(data[i]) + strlize)
if i != length - 1:
res += seperate
return res
def updateItem(oldInfo, newInfo, targetTable):
tableHead = getTableHead(targetTable)
setField = getKeyValueString(tableHead, newInfo, ',')
whereField = getKeyValueString(tableHead, oldInfo, 'and')
cursor = db.cursor()
returnStatus = 0
sql = """
update %s
set %s
where %s
"""%(targetTable, setField, whereField)
try:
print('start to execute:')
print(sql)
cursor.execute(sql)
db.commit()
print('success !')
returnStatus = 1
except:
print('updata error !')
db.rollback()
traceback.print_exception()
returnStatus = 0
db.close()
return returnStatus
def insertItem(newInfo, targetTable):
tableHeadStr = getValueString(getTableHead(targetTable))
valueStr = getValueString(newInfo,strlization=True)
cursor = db.cursor()
sql = """
insert into %s
(%s)
values
(%s)
"""%(targetTable, tableHeadStr, valueStr)
returnStatus = 0
try:
print('start to execute:')
print(sql)
cursor.execute(sql)
db.commit()
print('success !')
returnStatus = 1
except:
print('insert error !')
db.rollback()
traceback.print_exc()
returnStatus = 0
db.close()
return returnStatus
def deleteItem(oldInfo, targetTable):
tableHead = getTableHead(targetTable)
whereField = getKeyValueString(tableHead, oldInfo, 'and')
cursor = db.cursor()
sql = """
delete from %s
where %s
"""%(targetTable, whereField)
returnStatus = 0
try:
print('start to execute:')
print(sql)
cursor.execute(sql)
db.commit()
print('success !')
returnStatus = 1
except:
print('delete error !')
db.rollback()
traceback.print_exc()
returnStatus = 0
db.close()
return returnStatus
def getUserList():
db = pymysql.connect(host="localhost", port=3306, db="yukiyu", user="jhchen", password="123456",charset='utf8')
cursor = db.cursor()
sql = 'select name, password, user_id from user_list'
cursor.execute(sql)
res = cursor.fetchall()
return res | 8,536 | 2,882 |
from __main__ import *
| 23 | 8 |
N = int(input())
nums = []
for _ in range(N):
nums.append(int(input()))
nums.sort()
for num in nums:
print(num) | 121 | 52 |
from typing import Optional
import specs
import validatorlib as vlib
class ASAPValidator(vlib.BRValidator):
# Always $lash and prosper!
validator_behaviour = "asap"
def attest(self, known_items) -> Optional[specs.Attestation]:
# Not the moment to attest
if self.data.current_attest_slot != self.data.slot:
return None
# Too early in the slot
if (self.store.time - self.store.genesis_time) % specs.SECONDS_PER_SLOT < 4:
return None
# Already attested for this slot
if self.data.last_slot_attested == self.data.slot:
return None
# honest attest
return vlib.honest_attest(self, known_items)
def propose(self, known_items) -> Optional[specs.SignedBeaconBlock]:
# Not supposed to propose for current slot
if not self.data.current_proposer_duties[self.data.slot % specs.SLOTS_PER_EPOCH]:
return None
# Already proposed for this slot
if self.data.last_slot_proposed == self.data.slot:
return None
# honest propose
return vlib.honest_propose(self, known_items) | 1,207 | 369 |
# -*- coding: utf-8 -*-
# show.py
# Copyright (c) 2016-?, Matěj Týč
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import argparse as ap
from imreg_dft import cli
from imreg_dft import reporting
TOSHOW = (
"filtered input (I)mages",
"filtered input images (S)pectra",
"spectra (L)ogpolar transform",
"(1) angle-scale phase correlation",
"angle-scale transform (A)pplied",
"(2) translation phase correlation",
"(T)ile info",
)
TOSHOW_ABBR = "isl1a2t"
def create_parser():
parser = ap.ArgumentParser()
cli.update_parser_imreg(parser)
parser.add_argument("--prefix", default="reports")
parser.add_argument("--ftype", choices=("png", "pdf"), default="png")
parser.add_argument("--dpi", default=150, type=float)
parser.add_argument("--terse", default=False, action="store_true",
help="Don't show every smallest thing.")
parser.add_argument("--tex", default=False, action="store_true",
help="Use TeX to typeset labels (if applicable).")
parser.add_argument("--size", default=5, type=float,
help="Base image element size [in]")
parser.add_argument(
"--display", type=_show_valid, default=TOSHOW_ABBR,
help="String composing of '{}', meaning respectively: {}."
.format(TOSHOW_ABBR, ", ".join(TOSHOW)))
return parser
def _show_valid(stri):
stripped = stri.rstrip(TOSHOW_ABBR)
if len(stripped) > 0:
raise ap.ArgumentError("Argument contains invalid characters: {}"
.format(stripped))
return stri
def main():
parser = create_parser()
args = parser.parse_args()
opts = cli.args2dict(args)
reports = reporting.ReportsWrapper(args.display)
usetex = args.ftype == "pdf" and args.tex
from matplotlib import rc
if usetex:
rc("text", usetex=True)
rc("text.latex", unicode=True)
reporting.TEXT_MODE = "tex"
reports.set_global("dpi", args.dpi)
reports.set_global("ftype", args.ftype)
reports.set_global("size", args.size)
reports.set_global("usetex", usetex)
reports.set_global("terse", args.terse)
opts["show"] = False
opts["reports"] = reports
opts["prefix"] = args.prefix
cli.run(args.template, args.subject, opts)
reporting.report_tile(reports, args.prefix)
if __name__ == "__main__":
main()
| 3,864 | 1,306 |
"""
Distributions (Re)generation Script
This script generates likelihood and cost distributions based on threat
intelligence data stored in a connected Neo4j graph database. It attempts to
do so for every possible permutation of (size, industry) values.
These are then consumed by `montecarlo.py`, which runs a Monte Carlo
simulation based on these figures.
Acknowledgements: Dr Dan Prince & Dr Chris Sherlock
"""
import os
import sys
import argparse
import warnings
import logging as log
from typing import Tuple
import itertools
import numpy as np
import pandas as pd
import statsmodels.formula.api as smf
from matplotlib import pyplot as plt
from scipy.stats import lognorm
from graph import GraphInterface as gi
# Used for logging, equivalent to `logging.WARNING` + 1.
SUCCESS = 31
# The arbitrary maximum number of incidents that an organisation can experience
# in a year.
MAX_ANNUAL_INCIDENTS = 8000
# Quantifies the quantitative boundaries for human-readable incident frequencies,
# which many sources (e.g., the CSBS 2020) use to present their results.
#
# 'None' = 0
# 'Annually' = 1
# 'Less than monthly' = 2–7
# 'Monthly' = 8–17
# 'Weekly' = 18–79
# 'Daily' = 80–399
# 'More than daily' = 400–8000
BOUNDARIES = {
"None": 0,
"Once per year": 1,
"Less than once a month": 2,
"Once a month": 8,
"Once a week": 18,
"Once a day": 80,
"Several times a day": 400,
"MAX": MAX_ANNUAL_INCIDENTS,
}
OUTPUT_DIR = None
IMAGES = None
# pylint: disable=invalid-name,anomalous-backslash-in-string
def _generate_new_incident_frequency_distribution(pairing: Tuple = (None, None)) -> int:
"""
Generates a new incident frequency distribution.
Notes
-----
(Re)generates the incident frequency distribution for a
:math:`\left(\text{size}, \text{industry}\right)` pairing from the data in
a Neo4j graph database.
Currently this only produces log-normal distributions. Additional types of
distribution can be implemented by overloading this method (by importing the
`multipledispatch` package) and returning the values required for defining
that distribution (e.g., :math:`\mu` and :math:`\sigma` instead of :math:`a`
and :math:`b`).
"""
# pylint: enable=anomalous-backslash-in-string
log.info("Generating new incident frequency distribution for '%s'...", str(pairing))
# Attempts to get the incident probabilities for the pairing from the graph
# database
incident_frequency_probabilities = gi.get_incident_frequency_probabilities(
list(BOUNDARIES.values())[:-1], pairing
)
if incident_frequency_probabilities is None:
log.info(
"No incident frequency distribution generated for '%s'.",
str(pairing),
)
return 0
log.debug(
"Returned values are: incident frequency probabilities = %s",
str(incident_frequency_probabilities),
)
# If values are found, generate a distribution
Fs = np.cumsum(incident_frequency_probabilities)
xs = np.log(list(BOUNDARIES.values())[1:])
ys = np.log(1 - Fs)
data = pd.DataFrame(xs, ys)
# pylint: disable=line-too-long
# See <https://www.statsmodels.org/stable/_modules/statsmodels/stats/stattools.html#omni_normtest> for explanation
# pylint: enable=line-too-long
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fit = smf.ols(formula="ys ~ xs", data=data).fit()
log.debug(fit.summary())
# Get the parameters for the generated distribution and store them in the
# graph database.
alogb = fit.params[0]
a = -fit.params[1]
b = np.exp(alogb / a)
gi.create_incident_frequency_distribution_node(pairing, a, b)
log.log(
SUCCESS,
"New incident frequency distribution successfully generated for '%s'.",
str(pairing),
)
return 1
# pylint: enable=invalid-name
# pylint: disable=anomalous-backslash-in-string
def _generate_new_incident_costs_distribution(pairing: Tuple = (None, None)) -> int:
"""
(Re)generates the incident cost distribution for a
:math:`\left(\text{size}, \text{industry}\right)` pairing from the data in
a Neo4j graph database.
Currently this only produces log-normal distributions. Additional types of
distribution can be implemented by overloading this method (by importing the
`multipledispatch` package) and returning the values required for defining
that distribution (e.g., :math:`\mu` and :math:`\sigma` instead of :math:`a`
and :math:`b`).
"""
# pylint: enable=anomalous-backslash-in-string
# Plots the distribution for the average cost of incident(s) over 12 months
log.info("Generating new incident cost distribution for '%s'...", str(pairing))
incident_mean_cost, incident_median_cost = gi.get_incident_cost_averages(pairing)
if incident_mean_cost is None or incident_median_cost is None:
log.info(
"No incident costs distribution generated for '%s'.",
str(pairing),
)
return 0
log.debug(
"Returned values are: mean = %s, median = %s",
str(incident_mean_cost),
str(incident_median_cost),
)
log_stddev = np.sqrt(
2
* (
np.log(incident_mean_cost) - 0
if (incident_median_cost == 0)
else np.log(incident_median_cost)
)
)
stddev = np.exp(1) ** log_stddev
_label_plot(
"Average annual incident-with-outcome cost distribution", "Cost (£)", "Density"
)
plt.plot(
[
lognorm.pdf(
np.log(i),
np.log(incident_mean_cost),
np.log(incident_median_cost) if incident_median_cost > 0 else 0,
)
for i in range(1, 2500)
]
)
_save_plot("3 - cost dist")
gi.create_incident_costs_distribution_node(pairing, incident_mean_cost, stddev)
log.log(
SUCCESS,
"New incident costs distribution successfully generated for '%s'.",
str(pairing),
)
return 1
def _generate_new_distributions(pairing: Tuple = (None, None)) -> Tuple:
"""(Re)generates the cost and likelihood distributions."""
gi.__init__()
log.info("Existing distributions deleted: %s", bool(gi.delete_distributions()))
successful_incidents_dists = 0
successful_costs_dists = 0
# If either size or industry is unspecified, gets all possible values.
sizes = gi.get_sizes() if pairing[0] is None else [pairing[0]]
industries = gi.get_industries() if pairing[1] is None else [pairing[1]]
# Attempts to generate new distributions for every combination of size and
# industry values.
for pair in list(itertools.product(sizes, industries)):
successful_incidents_dists += _generate_new_incident_frequency_distribution(
pair
)
successful_costs_dists += _generate_new_incident_costs_distribution(pair)
return successful_incidents_dists, successful_costs_dists
def main():
"""Called when the script is run from the command-line."""
# pylint: disable=global-statement
global OUTPUT_DIR, IMAGES
# pylint: enable=global-statement
parser = argparse.ArgumentParser()
parser.add_argument(
"-s",
"--size",
help="Specify the org. size (default: None)",
choices=["micro", "small", "medium", "large"],
type=str,
default=None,
)
parser.add_argument(
"-i",
"--industry",
help="Specify the org. industry SIC code (top-level only, e.g. ‘C’ for "
"Manufacturing’) (default: None)",
choices=list(map(chr, range(65, 86))),
type=chr,
default=None,
)
parser.add_argument(
"-o",
"--output",
help="Specify the output directory (default: ./output/)",
type=str,
default=os.path.join(os.path.dirname(__file__), "output/"),
metavar="DIRECTORY",
)
parser.add_argument(
"-p",
"--images",
help="Output images at each step of the script (default: false, just "
"output the final LEC image)",
action="store_true",
default=False,
)
parser.add_argument(
"-v",
"--verbose",
help="Verbose console output (default: false)",
action="store_true",
default=False,
)
parser.add_argument(
"-d",
"--debug",
help="Show debug console output (default: false)",
action="store_true",
default=False,
)
args = parser.parse_args()
OUTPUT_DIR = args.output
IMAGES = args.images
size = args.size
industry = args.industry
if args.debug:
log.basicConfig(format="%(levelname)s: %(message)s", level=log.DEBUG)
log.info("Debug output.")
elif args.verbose:
log.basicConfig(format="%(levelname)s: %(message)s", level=log.INFO)
log.info("Verbose output.")
else:
log.basicConfig(format="%(levelname)s: %(message)s")
if not os.path.isdir(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
incidents_dists, costs_dists = _generate_new_distributions((size, industry))
log.log(
SUCCESS,
"Successfully generated %s incident frequency distributions and %s "
"incident costs distributions!",
str(incidents_dists),
str(costs_dists),
)
sys.exit(0)
def _label_plot(title="Untitled Plot", xlabel="x axis", ylabel="y axis") -> None:
"""Apply titles and axis labels to a plot."""
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
def _save_plot(filename="untitled") -> None:
"""Save a plot and clear the figure."""
if IMAGES:
plt.savefig(OUTPUT_DIR + filename + ".png")
plt.clf()
if __name__ == "__main__":
main()
| 9,897 | 3,129 |
print("hello from santa")
| 26 | 9 |
# Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ddt
from poppy.common import util
from poppy.transport.pecan.models.response import health
from tests.unit import base
class TestDNSModel(base.TestCase):
def setUp(self):
super(TestDNSModel, self).setUp()
def test_dns_is_alive(self):
dns_model = health.DNSModel(True)
self.assertEqual('true', dns_model['online'])
def test_dns_is_not_alive(self):
dns_model = health.DNSModel(False)
self.assertEqual('false', dns_model['online'])
class TestStorageModel(base.TestCase):
def setUp(self):
super(TestStorageModel, self).setUp()
def test_storage_is_alive(self):
storage_model = health.StorageModel(True)
self.assertEqual('true', storage_model['online'])
def test_storage_is_not_alive(self):
storage_model = health.StorageModel(False)
self.assertEqual('false', storage_model['online'])
class TestProviderModel(base.TestCase):
def setUp(self):
super(TestProviderModel, self).setUp()
def test_provider_is_alive(self):
provider_model = health.ProviderModel(True)
self.assertEqual('true', provider_model['online'])
def test_provider_is_not_alive(self):
provider_model = health.ProviderModel(False)
self.assertEqual('false', provider_model['online'])
@ddt.ddt
class TestHealthModel(base.TestCase):
def setUp(self):
super(TestHealthModel, self).setUp()
self.mock_controller = util.dict2obj(
{'base_url': 'https://www.poppycdn.io/'})
@ddt.file_data('health_map.json')
def test_health(self, health_map):
health_model = health.HealthModel(self.mock_controller, health_map)
storage_name = health_map['storage']['storage_name']
self.assertEqual('true',
health_model['storage'][storage_name]['online'])
dns_name = health_map['dns']['dns_name']
self.assertEqual('true',
health_model['dns'][dns_name]['online'])
@ddt.file_data('health_map_dns_not_available.json')
def test_health_dns_not_available(self, health_map):
health_model = health.HealthModel(self.mock_controller, health_map)
dns_name = health_map['dns']['dns_name']
self.assertEqual('false',
health_model['dns'][dns_name]['online'])
@ddt.file_data('health_map_storage_not_available.json')
def test_health_storage_not_available(self, health_map):
health_model = health.HealthModel(self.mock_controller, health_map)
storage_name = health_map['storage']['storage_name']
self.assertEqual('false',
health_model['storage'][storage_name]['online'])
@ddt.file_data('health_map_provider_not_available.json')
def test_health_provider_not_available(self, health_map):
health_model = health.HealthModel(self.mock_controller, health_map)
providers = health_map['providers']
for provider in providers:
provider_name = provider['provider_name']
provider_is_alive = provider['is_alive']
provider_model = health_model['providers'][provider_name]
if provider_is_alive:
self.assertEqual('true', provider_model['online'])
else:
self.assertEqual('false', provider_model['online'])
| 3,939 | 1,207 |
# vim: set ts=2 expandtab:
# -*- coding: utf-8 -*-
"""
Module: info.py
Desc: print current stream info
Author: on_three
Email: on.three.email@gmail.com
DATE: Sat, Oct 4th 2014
This could become very elaborate, showing stream status (up/down)
and number of viewers, etc, but at present i'm just going to
display stream URL in it for reference.
"""
import string
import re
#from pytz import timezone
#from datetime import datetime
#import locale
#import time
from twisted.python import log
import credentials
COMMAND_REGEX_STR = ur'^(?P<command>\.i|\.info|\.streaminfo)( (?P<data>\S+)$)?'
COMMAND_REGEX = re.compile(COMMAND_REGEX_STR, re.UNICODE)
class Info(object):
'''
Display some stream data
'''
def __init__(self, parent):
'''
constructor
'''
self._parent = parent
def is_msg_of_interest(self, user, channel, msg):
'''
PLUGIN API REQUIRED
Is the rx'd irc message of interest to this plugin?
'''
m = re.match(COMMAND_REGEX, msg)
if m:
return True
else:
return False
def handle_msg(self, user, channel, msg):
'''
PLUGIN API REQUIRED
Handle message and return nothing
'''
#log.msg('{channel} : {msg}'.format(channel=channel, msg=msg))
self.display_stream_info(channel, user, msg)
def display_stream_info(self, channel, user, msg):
'''
show stream related info
'''
for stream in credentials.STREAM_INFO:
self._parent.say(channel, str(stream))
| 1,476 | 508 |
#1
number = int(input("Enter a number to find the square root : "))
#2
if number < 0 :
print("Please enter a valid number.")
else :
#3
sq_root = number ** 0.5
#4
print("Square root of {} is {} ".format(number,sq_root))
| 229 | 85 |
# Copyright (C) 2019 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import base64
import io
import time
import calendar
from resultsdbpy.controller.configuration import Configuration
from resultsdbpy.model.configuration_context_unittest import ConfigurationContextTest
from resultsdbpy.model.mock_repository import MockStashRepository, MockSVNRepository
from resultsdbpy.model.model import Model
class MockModelFactory(object):
ARCHIVE_ZIP = """UEsDBAoAAAAAAAtSBU8AAAAAAAAAAAAAAAAIABAAYXJjaGl2ZS9VWAwAZ2RIXWZkSF31ARQAUEsDBBQACAAIAA9SBU8AAAAAAAAAAAAAAAAQABAAYXJjaGl2ZS9maWxlLnR4dFVYDABovU1d
bmRIXfUBFABLSSxJBABQSwcIY/PzrQYAAAAEAAAAUEsDBAoAAAAAABRdCU8AAAAAAAAAAAAAAAAJABAAX19NQUNPU1gvVVgMACi+TV0ovk1d9QEUAFBLAwQKAAAAAAAUXQlPAAAAAAAA
AAAAAAAAEQAQAF9fTUFDT1NYL2FyY2hpdmUvVVgMACi+TV0ovk1d9QEUAFBLAwQUAAgACAAPUgVPAAAAAAAAAAAAAAAAGwAQAF9fTUFDT1NYL2FyY2hpdmUvLl9maWxlLnR4dFVYDABo
vU1dbmRIXfUBFABjYBVjZ2BiYPBNTFbwD1aIUIACkBgDJxAbMTAwegFpIJ+xhoEo4BgSEgRhgXXcAeIFaEqYoeICDAxSyfm5eokFBTmpejmJxSWlxakpKYklqcoBwVC1b4DYg4GBH6Eu
NzE5B2K+CUROFCFXWJpYlJhXkpmXypCd4hELUsUaKK4AVs0w95H9l352x+37375yVmg4n0+cf9BBob6BgYWxtWmKSUpSipGxtWNRckZmWWpMhZFBaElmTmZJpbWBs6GzkbOzpa6FpamF
romRm6Wuk7mFi66FqZuxiamLhauriSsDAFBLBwjEE3dr4AAAAHwBAABQSwMEFAAIAAgAzFwJTwAAAAAAAAAAAAAAABIAEABhcmNoaXZlL2luZGV4Lmh0bWxVWAwAor1NXaC9TV31ARQA
tVNdb+IwEHz3r9j2qZUCvfbt7hCSSQxYCnHOdsrxmBK3tRRilJj2+u9vbajKfejeDgliMruzM7PJ5GI0IpC6/Vtvn549XKXXcPfp9jPQ/b41wLvtGGjbQkQH6M1g+hfTjAkBaRo7+N4+
HLx1HdRdA4fBgO1gcId+a+KdB9vV/Rs8un43JPBq/TO4Pl7dwRPYucY+2m0dGBKoewN70++s96aBfe9ebIMH/1x7/DHI0rbu1XZPsHVdY0PTQGLXzvgvBG7Hv4kawD2+q9m6BusOg0cT
vkaVgbF+cC8BOtkngJ/Oebs1CeJ2gBbZAsnHwGjrVzU4ctvWdmf6MYG7P0XgsLMc3kWgv+aAwv6HDjj6izyN2x52pvP1+5pucAMO0R52tTe9rdvhI+y4okB7biGsWy+5AiXmek0lAzyX
UtzzjGUw2wAtyxxvFukYLqlC9BJokeF3Q4B9LyVTCoQEvipzjh1IIWmhOVNJaMqrjBeLBGaVhkJoyPmKayzTIsGxjPylD8QcVkymS/xLZzznehMnzrkuwrA5TqNQUql5WuVUEigrWQrF
IKjPuEpzylcsGwMKwKHA7lmhQS1pnp+7EdiZikJLjuKEVDBjKI/OEI8jig2SSZbqYOTjlGIwKCxPQJUs5XgIOTC0QeUmCVEgqWLfKqxCFDK6ogt0dfXvNEgIPa0kWwWxGIGqZkpzXWkG
CyGyGLJi8p6nTH2FXKgYVKVYgiM0TaIf5MCYEMfiWaV4DIwXmklZlZqL4hqWYo2BoEqKvVlMVhTRLe5DSNwq0oYcYvIJrJcMARmyjGnREIPC1FJ9XoYDMURNznxCwRY5X7AiZQEWgWbN
FbvGRXEVCvhx8JpuQFTRNdYET+R4Pnssk7hG4HOg2T0Pyk/VuHnFT49JjC1dnjIfk9FoSsjk2e/aKV5M3Zh+OvHWt2Zqu8b8GAdocnO8M7k5VZDJg2vepvENWxp8A+HV9W1zQSY3RwAr
A+VPUEsHCPbdMMviAgAAYQUAAFBLAwQUAAgACADMXAlPAAAAAAAAAAAAAAAAHQAQAF9fTUFDT1NYL2FyY2hpdmUvLl9pbmRleC5odG1sVVgMAKK9TV2gvU1d9QEUAGNgFWNnYGJg8E1M
VvAPVohQgAKQGAMnEBsxMDB6AWkgn7GGgSjgGBISBGGBddwB4gVoSpih4gIMDFLJ+bl6iQUFOal6OYnFJaXFqSkpiSWpygHBULVvgNiDgYEfoS43MTkHYr4JRE4UIVdYmliUmFeSmZfK
UL/XNxak6qLfEiGwaoa5j+y/9LM7bt//9pWzQsP5fOL8gw4K9Q0MLIytTVNMUpJSjIytHYuSMzLLUmMqjAxCSzJzMksqrQ2cDZ2NnJ0tdS0sTS10TYzcLHWdzC1cdC1M3YxNTF0sXF1N
XBkAUEsHCLRBGwrgAAAAfAEAAFBLAwQUAAgACAALUgVPAAAAAAAAAAAAAAAAEgAQAF9fTUFDT1NYLy5fYXJjaGl2ZVVYDABnZEhdZmRIXfUBFABjYBVjZ2BiYPBNTFbwD1aIUIACkBgD
JxAbMTAwCgFpIJ/RhYEo4BgSEgRhgXVsAeIJaEqYoOIeDAz8yfm5eokFBTmpermJyTkQ+T8QOVGEXGFpYlFiXklmXioDI0Ntye3fifMcHKZ8fXTEZauLLSPD3Ef2X/rZHbfvf/vKWaHh
fD4x7izUNzCwMLY2gAJrx6LkjMyy1JgKI4PQksyczJJKawNnQ2cjZ2dLXQtLUwtdEyM3S10ncwsXXQtTN2MTUxcLV1cTVwYAUEsHCAAolTbHAAAARAEAAFBLAQIVAwoAAAAAAAtSBU8A
AAAAAAAAAAAAAAAIAAwAAAAAAAAAAEDtQQAAAABhcmNoaXZlL1VYCABnZEhdZmRIXVBLAQIVAxQACAAIAA9SBU9j8/OtBgAAAAQAAAAQAAwAAAAAAAAAAECkgTYAAABhcmNoaXZlL2Zp
bGUudHh0VVgIAGi9TV1uZEhdUEsBAhUDCgAAAAAAFF0JTwAAAAAAAAAAAAAAAAkADAAAAAAAAAAAQP1BigAAAF9fTUFDT1NYL1VYCAAovk1dKL5NXVBLAQIVAwoAAAAAABRdCU8AAAAA
AAAAAAAAAAARAAwAAAAAAAAAAED9QcEAAABfX01BQ09TWC9hcmNoaXZlL1VYCAAovk1dKL5NXVBLAQIVAxQACAAIAA9SBU/EE3dr4AAAAHwBAAAbAAwAAAAAAAAAAECkgQABAABfX01B
Q09TWC9hcmNoaXZlLy5fZmlsZS50eHRVWAgAaL1NXW5kSF1QSwECFQMUAAgACADMXAlP9t0wy+ICAABhBQAAEgAMAAAAAAAAAABApIE5AgAAYXJjaGl2ZS9pbmRleC5odG1sVVgIAKK9
TV2gvU1dUEsBAhUDFAAIAAgAzFwJT7RBGwrgAAAAfAEAAB0ADAAAAAAAAAAAQKSBawUAAF9fTUFDT1NYL2FyY2hpdmUvLl9pbmRleC5odG1sVVgIAKK9TV2gvU1dUEsBAhUDFAAIAAgA
C1IFTwAolTbHAAAARAEAABIADAAAAAAAAAAAQKSBpgYAAF9fTUFDT1NYLy5fYXJjaGl2ZVVYCABnZEhdZmRIXVBLBQYAAAAACAAIAF4CAAC9BwAAAAA="""
THREE_WEEKS = 60 * 60 * 24 * 21
@classmethod
def create(cls, redis, cassandra, async_processing=False):
oldest_commit = time.time()
for repo in [MockStashRepository.safari(), MockSVNRepository.webkit()]:
for commits in repo.commits.values():
for commit in commits:
oldest_commit = min(oldest_commit, calendar.timegm(commit.timestamp.timetuple()))
model = Model(
redis=redis,
cassandra=cassandra,
repositories=[
MockStashRepository.safari(redis=redis),
MockSVNRepository.webkit(redis=redis),
],
default_ttl_seconds=time.time() - oldest_commit + Model.TTL_WEEK,
archive_ttl_seconds=time.time() - oldest_commit + Model.TTL_WEEK,
async_processing=async_processing,
)
with model.commit_context, model.commit_context.cassandra.batch_query_context():
for repository in model.commit_context.repositories.values():
for branch_commits in repository.commits.values():
for commit in branch_commits:
model.commit_context.register_commit(commit)
return model
@classmethod
def layout_test_results(cls):
default_result = {'expected': 'PASS', 'modifiers': '', 'actual': 'PASS', 'time': 1.2}
return dict(
details=dict(link='dummy-link'),
run_stats=dict(tests_skipped=0),
results={
'fast': {
'encoding': {
'css-cached-bom.html': default_result,
'css-charset-default.xhtml': default_result,
'css-charset.html': default_result,
'css-link-charset.html': default_result,
}
}
},
)
@classmethod
def iterate_all_commits(cls, model, callback):
repos = ('webkit', 'safari')
branches = (None, 'safari-606-branch')
for branch in branches:
commit_index = {repo: 0 for repo in repos}
commits_for_repo = {repo: sorted(model.commit_context.find_commits_in_range(repo, branch)) for repo in repos}
for repo in repos:
while max([commits_for_repo[r][commit_index[r]] for r in repos]) > commits_for_repo[repo][commit_index[repo]]:
if commit_index[repo] + 1 >= len(commits_for_repo[repo]):
break
commit_index[repo] += 1
while True:
commits = []
for repo in repos:
commits.append(commits_for_repo[repo][commit_index[repo]])
callback(commits)
youngest_next_repo = None
for repo in repos:
if commit_index[repo] + 1 >= len(commits_for_repo[repo]):
continue
if not youngest_next_repo:
youngest_next_repo = repo
continue
if commits_for_repo[youngest_next_repo][commit_index[youngest_next_repo] + 1] > commits_for_repo[repo][commit_index[repo] + 1]:
youngest_next_repo = repo
if not youngest_next_repo:
break
commit_index[youngest_next_repo] += 1
@classmethod
def add_mock_results(cls, model, configuration=Configuration(), suite='layout-tests', test_results=None):
if test_results is None:
test_results = cls.layout_test_results()
configurations = [configuration] if configuration.is_complete() else ConfigurationContextTest.CONFIGURATIONS
with model.upload_context:
current = time.time()
old = current - cls.THREE_WEEKS
for complete_configuration in configurations:
if complete_configuration != configuration:
continue
timestamp_to_use = current
if (complete_configuration.platform == 'Mac' and complete_configuration.version <= Configuration.version_to_integer('10.13')) \
or (complete_configuration.platform == 'iOS' and complete_configuration.version <= Configuration.version_to_integer('11')):
timestamp_to_use = old
cls.iterate_all_commits(model, lambda commits: model.upload_context.upload_test_results(complete_configuration, commits, suite=suite, test_results=test_results, timestamp=timestamp_to_use))
@classmethod
def process_results(cls, model, configuration=Configuration(), suite='layout-tests'):
configurations = [configuration] if configuration.is_complete() else ConfigurationContextTest.CONFIGURATIONS
with model.upload_context:
for complete_configuration in configurations:
if complete_configuration != configuration:
continue
for branch in (None, 'safari-606-branch'):
results_dict = model.upload_context.find_test_results(
configurations=[complete_configuration], suite=suite,
branch=branch, recent=False,
)
for config, results in results_dict.items():
for result in results:
model.upload_context.process_test_results(
configuration=config, commits=result['commits'], suite=suite,
test_results=result['test_results'], timestamp=result['timestamp'],
)
@classmethod
def add_mock_archives(cls, model, configuration=Configuration(), suite='layout-tests', archive=None):
archive = archive or io.BytesIO(base64.b64decode(cls.ARCHIVE_ZIP))
configurations = [configuration] if configuration.is_complete() else ConfigurationContextTest.CONFIGURATIONS
with model.upload_context:
current = time.time()
old = current - cls.THREE_WEEKS
for complete_configuration in configurations:
if complete_configuration != configuration:
continue
timestamp_to_use = current
if (complete_configuration.platform == 'Mac' and complete_configuration.version <= Configuration.version_to_integer('10.13')) \
or (complete_configuration.platform == 'iOS' and complete_configuration.version <= Configuration.version_to_integer('11')):
timestamp_to_use = old
cls.iterate_all_commits(model, lambda commits: model.archive_context.register(archive, complete_configuration, commits, suite=suite, timestamp=timestamp_to_use))
| 12,141 | 4,975 |
import logging
import time
from typing import List
import pytest
import pymq
from pymq import NoSuchRemoteError
from pymq.exceptions import RemoteInvocationError
from pymq.typing import deep_from_dict, deep_to_dict
logger = logging.getLogger(__name__)
class EchoCommand:
param: str
def __init__(self, param: str) -> None:
super().__init__()
self.param = param
def __str__(self) -> str:
return "EchoCommand(%s)" % self.__dict__
class EchoResponse:
result: str
def __init__(self, result: str) -> None:
super().__init__()
self.result = result
def __str__(self) -> str:
return "EchoResponse(%s)" % self.__dict__
def void_function() -> None:
pass
def delaying_function() -> None:
time.sleep(1.5)
def simple_remote_function(param) -> str:
return f"Hello {param}!"
def simple_multiple_param_function(p1: int, p2: int) -> int:
return p1 * p2
def simple_multiple_param_default_function(p1: int, p2: int = 3) -> int:
return p1 * p2
def simple_list_param_function(ls: List[int]) -> int:
return sum(ls)
def echo_command_function(cmd: EchoCommand) -> str:
return "Hello %s!" % cmd.param
def echo_command_response_function(cmd: EchoCommand) -> EchoResponse:
return EchoResponse("Hello %s!" % cmd.param)
def error_function():
raise ValueError("oh noes")
class RpcHolder:
def __init__(self, prefix="Hello") -> None:
super().__init__()
self.prefix = prefix
def echo(self, cmd: EchoCommand) -> EchoResponse:
return EchoResponse("%s %s!" % (self.prefix, cmd.param))
# noinspection PyUnresolvedReferences
class TestRpc:
@pytest.mark.timeout(60)
def test_assert_bus(self, bus):
# gives localstack a chance to start
assert bus
@pytest.mark.timeout(2)
def test_marshall_rpc_request(self, bus):
request = pymq.RpcRequest("some_function", "callback_queue", ("simple_arg",))
request_tuple = deep_to_dict(request)
assert ("some_function", "callback_queue", ("simple_arg",), None) == request_tuple
request_unmarshalled: pymq.RpcRequest = deep_from_dict(request_tuple, pymq.RpcRequest)
assert "some_function" == request_unmarshalled.fn
assert "callback_queue" == request_unmarshalled.response_channel
assert ("simple_arg",) == request_unmarshalled.args
@pytest.mark.timeout(2)
def test_rpc_on_non_exposed_remote_raises_exception(self, bus):
stub = bus.stub("simple_remote_function")
with pytest.raises(NoSuchRemoteError):
stub.rpc("test")
@pytest.mark.timeout(2)
def test_call_on_non_exposed_remote_returns_none(self, bus):
stub = bus.stub("simple_remote_function")
assert stub("test") is None
@pytest.mark.timeout(2)
def test_void_function(self, bus):
bus.expose(void_function, channel="void_function")
stub = bus.stub("void_function")
result = stub()
assert result is None
@pytest.mark.timeout(2)
def test_void_function_error(self, bus):
bus.expose(void_function, channel="void_function")
stub = bus.stub("void_function")
with pytest.raises(RemoteInvocationError):
stub(1, 2, 3)
@pytest.mark.timeout(2)
def test_simple_function(self, bus):
bus.expose(simple_remote_function, channel="simple_remote_function")
stub = bus.stub("simple_remote_function")
result = stub("unittest")
assert "Hello unittest!" == result
@pytest.mark.timeout(2)
def test_simple_multiple_param_function(self, bus):
bus.expose(simple_multiple_param_function, channel="simple_multiple_param_function")
stub = bus.stub("simple_multiple_param_function")
result = stub(2, 3)
assert 6 == result
@pytest.mark.timeout(2)
def test_simple_multiple_param_default_function(self, bus):
bus.expose(
simple_multiple_param_default_function, channel="simple_multiple_param_default_function"
)
stub = bus.stub("simple_multiple_param_default_function")
result = stub(2)
assert 6 == result
@pytest.mark.timeout(2)
def test_simple_list_param_function(self, bus):
bus.expose(simple_list_param_function, channel="simple_list_param_function")
stub = bus.stub("simple_list_param_function")
result = stub([2, 3, 4])
assert 9 == result
@pytest.mark.timeout(2)
def test_echo_command_function(self, bus):
bus.expose(echo_command_function, channel="echo_command_function")
stub = bus.stub("echo_command_function")
assert "Hello unittest!" == stub(EchoCommand("unittest"))
@pytest.mark.timeout(2)
def test_echo_command_response_function(self, bus):
bus.expose(echo_command_response_function, channel="echo_command_response_function")
stub = bus.stub("echo_command_response_function")
result = stub(EchoCommand("unittest"))
assert isinstance(result, EchoResponse)
assert result.result == "Hello unittest!"
@pytest.mark.timeout(5)
def test_timeout(self, bus):
bus.expose(delaying_function, channel="delaying_function")
stub = bus.stub("delaying_function", timeout=1)
with pytest.raises(RemoteInvocationError):
stub()
@pytest.mark.timeout(2)
def test_stateful_rpc(self, bus):
obj = RpcHolder()
bus.expose(obj.echo)
stub = bus.stub(RpcHolder.echo)
result = stub(EchoCommand("unittest"))
assert isinstance(result, EchoResponse)
assert "Hello unittest!" == result.result
@pytest.mark.timeout(2)
def test_remote_decorator(self, bus):
@pymq.remote
def remote_test_fn(param: str) -> str:
return "hello %s" % param
stub = bus.stub(remote_test_fn)
assert "hello unittest" == stub("unittest")
@pytest.mark.timeout(2)
def test_error_function(self, bus):
bus.expose(error_function, channel="error_function")
stub = bus.stub("error_function")
with pytest.raises(RemoteInvocationError) as e:
stub()
e.match("ValueError")
@pytest.mark.timeout(2)
def test_expose_multiple_times_raises_error(self, bus):
bus.expose(void_function)
with pytest.raises(ValueError):
bus.expose(void_function)
@pytest.mark.timeout(2)
def test_expose_multiple_by_channel_times_raises_error(self, bus):
bus.expose(void_function, channel="void_function")
with pytest.raises(ValueError):
bus.expose(void_function, channel="void_function")
@pytest.mark.timeout(2)
def test_expose_after_unexpose(self, bus):
bus.expose(void_function)
bus.unexpose(void_function)
bus.expose(void_function)
@pytest.mark.timeout(2)
def test_expose_after_unexpose_by_channel(self, bus):
bus.expose(void_function, channel="void_function")
bus.unexpose("void_function")
bus.expose(void_function, channel="void_function")
@pytest.mark.timeout(2)
def test_rpc_after_unexpose_raises_exception(self, bus):
bus.expose(simple_remote_function, "simple_remote_function")
stub = bus.stub("simple_remote_function")
assert "Hello test!" == stub("test")
bus.unexpose("simple_remote_function")
with pytest.raises(NoSuchRemoteError):
stub.rpc("test")
@pytest.mark.timeout(5)
def test_expose_after_unexpose_by_channel_calls_correct_method(self, bus):
def fn1():
return 1
def fn2():
return 2
bus.expose(fn1, channel="myfn")
stub = bus.stub("myfn")
assert 1 == stub()
logger.debug("unexposing myfn")
bus.unexpose("myfn")
time.sleep(1) # FIXME i have no idea why this is necessary
logger.debug("exposing myfn")
bus.expose(fn2, channel="myfn")
logger.debug("creating second stub for myfn")
stub = bus.stub("myfn")
logger.debug("calling stub for myfn")
assert 2 == stub()
@pytest.mark.timeout(60)
def test_expose_before_init(self, pymq_init):
def remote_fn():
return "hello"
pymq.expose(remote_fn, "remote_fn")
with pytest.raises(ValueError):
stub = pymq.stub("remote_fn")
stub()
pymq_init()
stub = pymq.stub("remote_fn")
assert "hello" == stub()
| 8,530 | 2,787 |
#Intro
Intro_Tavern_Elrick = u"¿Qué os parece? ¿Trabajaremos juntos?"
Intro_Tavern_Alida = u"¿Pero si todos superamos las pruebas, quien se casara con la princesa?"
Harek = u"Supongo que la princesa se casará con quién más le guste"
Elrick = u"Sería lo más inteligente. Utilizar las pruebas para conocernos y ver nuestras virtudes, especialmente las mias, para elegir depués quien más le guste."
Alida = u"Te veo muy seguro de tus posibilidades"
Elrick = u"Seguro que la princesa busca un pretendiente inteligente que sepa tomar buenas decisiones. Y si además es chico guapo, mejor"
Harek = u"Puede que yo no sea muy guapo, pero si la princesa Melisenda me escogiera a mí me deviviría por hacerla sentir como una reina"
Alida = u"Eso es realmente hermoso, Harek. Pero cuando Melisenda regente Sapiensa necesitará a su lado alguien que se preocupe por el reino y sepa hacerlo prosperar"
Sullx = u"Elrick será un buen soberano, ya dirigimos hace años un ejercito de zombis"
Alida = u"¡Sapiensa no es un reino bélico! Hace tiempo que estamos en paz y no creo que nadie quiera que eso cambie."
Elrick = u"Sullx habla del pasado, yo tampoco quiero que se acabe la paz en Sapiensa"
Harek = u"Nada de guerra en Sapiensa"
Alida = u"Bien, parece que empezamos a entendernos. ¿Que hacemos para superar las pruebas?"
Elrick = u"..."
Harek = u"..."
Sullx = u"Yo conozco a una tarotixta, lo mismo ella nos da alguna pista"
Harek = u"¡Eso es una buena idea! Además seguro que tiene un gato negro. Me encantan los gatos."
Alida = u"Los gatos son más típicos de la brujas."
Elrick = u"Pero creo que ya no sigue adivinando, que ahora se dedica a la respostería"
Harek = u"¡Oh! ¿Es la mujer que vende galletitas de la fortuna?"
Elrick = u"¡Esa! ¿Habeís probado las bambas? Yo cuando llevo bien la línea me permito desayunar alguna. ¡Están de muerte!"
# transición para dialogo de puzles
Puzles = u"Seguramente os preguntareís como alguien tan prometedor como yo a acabado ayudando a un bárbaro amante de los gatos, a un nigromante con delirios de grandeza y a una cazadora disfrazada de hombrea conseguir el corazón de una princesa."
Puzles = u"Mi nombre es HADOKEN Puzles y esta es mi historia." | 2,178 | 780 |
from keys.keys import pwd
import pymongo
from flask import Flask, request, abort
from flask_restful import Resource, Api, reqparse, marshal_with, fields
"""
DATABASE CONFIGURATION
"""
databaseName = "students"
connection_url = f'mongodb+srv://crispen:{pwd}@cluster0.3zay8.mongodb.net/{databaseName}?retryWrites=true&w=majority'
client = pymongo.MongoClient(connection_url)
cursor = client.list_database_names()
db = client.blob
"""
Student post args
"""
student_post_args = reqparse.RequestParser()
student_post_args.add_argument("name", type=str, help="name required", required=True)
student_post_args.add_argument("surname", type=str, help="surname required", required=True)
student_post_args.add_argument("student_number", type=int, help="student number required", required=True)
student_post_args.add_argument("course", type=str, help="name required", required=True)
student_post_args.add_argument("mark", type=int, help="surname required", required=True)
"""
Student patch args
* We want to be able only to update student course and mark
"""
"""
Resource Fields
"""
resource_fields = {
'_id': fields.String,
'name': fields.String,
'surname': fields.String,
'course': fields.String,
'mark': fields.Integer,
"student_number":fields.Integer,
}
app = Flask(__name__)
app.config["ENV"] = "development"
api = Api(app)
class GetPatchDeleteStudent(Resource):
@marshal_with(resource_fields)
def get(self, id):
cursor = db.students.find_one({"student_number": id})
if cursor is None:
abort(404, f"Student with student number {id} not found.")
return cursor, 200
def delete(self, id):
cursor = db.students.find_one({"student_number": id})
if cursor is None:
abort(404, f"Student with student number {id} not found.")
db.students.delete_one({"student_number": id})
return "", 204
@marshal_with(resource_fields)
def patch(self, id):
args = student_post_args.parse_args()
cursor = db.students.find_one({"student_number": id})
if cursor is None:
abort(404, f"Student with student number {id} not found.")
if args["mark"]:
db.students.update_one({"student_number": id}, {"$set":
{"mark": args["mark"]}
})
if args["course"]:
db.students.update_one({"student_number": id}, {
"$set": {"course": args["course"]}
})
return db.students.find_one({"student_number": id}), 204
class PostStudent(Resource):
@marshal_with(resource_fields)
def post(self):
args = student_post_args.parse_args()
cursor = db.students.find_one({"student_number": args["student_number"]})
if cursor is None:
"""
Insert the students to the database.
"""
res = db.students.insert_one({
"name": args["name"],
"surname": args["surname"],
"student_number": args["student_number"],
"course": args["course"],
"mark": args["mark"]
})
print(res, type(res))
else:
abort(409, "Student number taken by another student")
return db.students.find_one({"student_number": args["student_number"]}), 201
api.add_resource(PostStudent, '/student')
api.add_resource(GetPatchDeleteStudent, '/student/<int:id>')
if __name__ == "__main__":
app.run(debug=True) | 3,488 | 1,075 |
import csv
import math
import datetime
def build_target_possession(player_file, till):
possessions = []
to_skip = 1 # first line
with open(player_file) as csv_file:
reader = csv.reader(csv_file, delimiter=';')
for row in reader:
if to_skip:
to_skip -= 1
continue
if not row:
continue
if row[0] == 'Statistic:' or row[0] == '':
break
t = datetime.datetime.strptime(row[2], "%H:%M:%S.%f")
t = float(t.minute * 60 + t.hour * 60 * 60 + t.second) * math.pow(10, 12) + t.microsecond * math.pow(10, 6)
if t <= till:
possessions.append(t)
possession_time = 0
# always match begin end
if len(possessions) % 2 != 0:
possessions = possessions[-1:]
for i in range(0, len(possessions) - 1, 2):
possession_time += possessions[i + 1] - possessions[i]
return possession_time * 10 ** -12
def build_target_possessions_first_half():
players = (
"Nick Gertje",
"Dennis Dotterweich",
"Willi Sommer",
"Philipp Harlass",
"Roman Hartleb",
"Erik Engelhardt",
"Sandro Schneider",
"Leon Krapf",
"Kevin Baer",
"Luca Ziegler",
"Ben Mueller",
"Vale Reitstetter",
"Christopher Lee",
"Leon Heinze",
"Leo Langhans",
)
possessions = {}
for player in players:
file_name = f"oracle/Ball Possession/1st Half/{player}.csv"
# [(12397999951273772 - 10753295594424116L) * 10 ** -12 + 3.092 + 0.9885] * 10**12
player_possession = build_target_possession(file_name, 1648784856849656)
possessions[player] = player_possession
return possessions
def build_target_possessions_second_half():
players = (
"Nick Gertje",
"Dennis Dotterweich",
"Niklas Welzlein",
"Willi Sommer",
"Philipp Harlass",
"Roman Hartleb",
"Erik Engelhardt",
"Sandro Schneider",
"Leon Krapf",
"Kevin Baer",
"Luca Ziegler",
"Ben Mueller",
"Vale Reitstetter",
"Christopher Lee",
"Leon Heinze",
"Leo Langhans",
)
possessions = {}
for player in players:
file_name = f"oracle/Ball Possession/2nd Half/{player}.csv"
# [(14879639049922641 - 13086639146403495) * 10 ** -12 + 0.455 + 0.84795] * 10**12
player_possession = build_target_possession(file_name, 1794302853519146)
possessions[player] = player_possession
return possessions
def compute_errors_first_half():
target_posssessions = build_target_possessions_first_half()
predicted_possessions = {}
with open('../results/to_validate/first_half/ball_possession.txt') as f:
possessions = []
for row in f:
possessions.append(row)
possessions = possessions[::-1]
already_checked = set()
for event in possessions:
event_split = event.split(",")
player = event_split[1]
time = int(event_split[2])
if player not in already_checked:
predicted_possessions[player] = time * 10**-12
already_checked.add(player)
errors = {}
for player, possession in target_posssessions.items():
# I'm too lazy to rename where needed
if player == 'Willi Sommer':
player = 'Wili Sommer'
if player not in predicted_possessions:
continue
errors[player] = abs(possession - predicted_possessions[player])
return errors
def compute_errors_second_half():
target_posssessions = build_target_possessions_second_half()
predicted_possessions = {}
with open('../results/to_validate/second_half/ball_possession.txt') as f:
possessions = []
for row in f:
possessions.append(row)
possessions = possessions[::-1]
already_checked = set()
for event in possessions:
event_split = event.split(",")
player = event_split[1]
time = int(event_split[2])
if player not in already_checked:
predicted_possessions[player] = time * 10**-12
already_checked.add(player)
errors = {}
for player, possession in target_posssessions.items():
# I'm too lazy to rename where needed
if player == 'Willi Sommer':
player = 'Wili Sommer'
if player not in predicted_possessions:
continue
errors[player] = abs(possession - predicted_possessions[player])
return errors
| 4,703 | 1,549 |
from directory_constants import urls
from django.conf import settings
from django.utils import translation
from directory_components import helpers
def ga360(request):
user = helpers.get_user(request)
is_logged_in = helpers.get_is_authenticated(request)
context = {'ga360': {'site_language': translation.get_language()}}
if is_logged_in and hasattr(user, 'hashed_uuid'):
context['ga360']['user_id'] = user.hashed_uuid
else:
context['ga360']['user_id'] = None
context['ga360']['login_status'] = is_logged_in
if hasattr(settings, 'GA360_BUSINESS_UNIT'):
context['ga360']['business_unit'] = settings.GA360_BUSINESS_UNIT
return context
def sso_processor(request):
url = request.build_absolute_uri()
sso_register_url = helpers.add_next(settings.SSO_PROXY_SIGNUP_URL, url)
return {
'sso_user': helpers.get_user(request),
'sso_is_logged_in': helpers.get_is_authenticated(request),
'sso_login_url': helpers.add_next(settings.SSO_PROXY_LOGIN_URL, url),
'sso_register_url': sso_register_url,
'sso_logout_url': helpers.add_next(settings.SSO_PROXY_LOGOUT_URL, url),
'sso_profile_url': settings.SSO_PROFILE_URL,
}
def analytics(request):
return {
'directory_components_analytics': {
'GOOGLE_TAG_MANAGER_ID': settings.GOOGLE_TAG_MANAGER_ID,
'GOOGLE_TAG_MANAGER_ENV': settings.GOOGLE_TAG_MANAGER_ENV,
'UTM_COOKIE_DOMAIN': settings.UTM_COOKIE_DOMAIN,
}
}
def cookie_notice(request):
return {
'directory_components_cookie_notice': {
'PRIVACY_COOKIE_DOMAIN': settings.PRIVACY_COOKIE_DOMAIN
}
}
def header_footer_processor(request):
magna_header = settings.MAGNA_HEADER or False
magna_urls = {
'magna_home': urls.magna.HOME,
'magna_where_to_export': urls.magna.WHERE_TO_EXPORT,
'magna_learn_to_export': urls.magna.LEARN_TO_EXPORT,
'magna_exportplan_dashboard': urls.magna.EXPORT_PLAN_DASHBOARD,
'magna_search': urls.magna.SEARCH,
'magna_privacy_and_cookies': urls.magna.PRIVACY_AND_COOKIES,
'magna_terms_and_conditions': urls.magna.TERMS_AND_CONDITIONS,
'magna_accessibility': urls.magna.ACCESSIBILITY,
'magna_cookie_preference_settings': urls.magna.COOKIE_PREFERENCE_SETTINGS,
'magna_contact_us': urls.magna.CONTACT_US,
'magna_performance': urls.magna.PERFORMANCE_DASHBOARD,
'magna_account': urls.magna.ACCOUNT,
'magna_advice': urls.magna.ADVICE,
'magna_markets': urls.magna.MARKETS,
'magna_services': urls.magna.SERVICES,
'magna_international': urls.magna.INTERNATIONAL,
}
advice_urls = {
'create_an_export_plan': urls.domestic.ADVICE_CREATE_AN_EXPORT_PLAN,
'find_an_export_market': urls.domestic.ADVICE_FIND_AN_EXPORT_MARKET,
'define_route_to_market': urls.domestic.ADVICE_DEFINE_ROUTE_TO_MARKET,
'get_export_finance_and_funding': urls.domestic.ADVICE_GET_EXPORT_FINANCE_AND_FUNDING,
'manage_payment_for_export_orders': urls.domestic.ADVICE_MANAGE_PAYMENT_FOR_EXPORT_ORDERS,
'prepare_to_do_business_in_a_foreign_country': urls.domestic.ADVICE_PREPARE_TO_DO_BUSINESS_IN_A_FOREIGN_COUNTRY,
'manage_legal_and_ethical_compliance': urls.domestic.ADVICE_MANAGE_LEGAL_AND_ETHICAL_COMPLIANCE,
'prepare_for_export_procedures_and_logistics': urls.domestic.ADVICE_PREPARE_FOR_EXPORT_PROCEDURES_AND_LOGISTICS,
}
header_footer_urls = {
'about': urls.domestic.ABOUT,
'dit': urls.domestic.DIT,
'get_finance': urls.domestic.GET_FINANCE,
'ukef': urls.domestic.GET_FINANCE,
'performance': urls.domestic.PERFORMANCE_DASHBOARD,
'privacy_and_cookies': urls.domestic.PRIVACY_AND_COOKIES,
'terms_and_conditions': urls.domestic.TERMS_AND_CONDITIONS,
'accessibility': urls.domestic.ACCESSIBILITY,
'cookie_preference_settings': urls.domestic.COOKIE_PREFERENCE_SETTINGS,
'fas': urls.international.TRADE_FAS,
'advice': urls.domestic.ADVICE,
'markets': urls.domestic.MARKETS,
'search': urls.domestic.SEARCH,
'services': urls.domestic.SERVICES,
'domestic_news': urls.domestic.GREAT_DOMESTIC_NEWS,
'international_news': urls.international.NEWS,
'how_to_do_business_with_the_uk': urls.international.EXPAND_HOW_TO_DO_BUSINESS,
'industries': urls.international.ABOUT_UK_INDUSTRIES,
'market_access': urls.domestic.HOME / 'report-trade-barrier'
}
header_footer_urls = {**header_footer_urls, **advice_urls, **magna_urls}
return {'magna_header': magna_header, 'header_footer_urls': header_footer_urls}
def invest_header_footer_processor(request):
invest_header_footer_urls = {
'industries': urls.international.ABOUT_UK_INDUSTRIES,
'uk_setup_guide': urls.international.EXPAND_HOW_TO_SETUP,
}
return {'invest_header_footer_urls': invest_header_footer_urls}
def urls_processor(request):
return {
'services_urls': {
'contact_us': urls.domestic.CONTACT_US,
'contact_us_international': urls.international.CONTACT_US,
'events': urls.domestic.EVENTS,
'exopps': urls.domestic.EXPORT_OPPORTUNITIES,
'exred': urls.domestic.HOME,
'great_domestic': urls.domestic.HOME,
'great_international': urls.international.HOME,
'fab': urls.domestic.FIND_A_BUYER,
'fas': urls.international.TRADE_FAS,
'feedback': urls.domestic.FEEDBACK,
'office_finder': urls.domestic.OFFICE_FINDER,
'invest': urls.international.EXPAND_HOME,
'soo': urls.domestic.SELLING_OVERSEAS,
'sso': urls.domestic.SINGLE_SIGN_ON,
'uk_setup_guide': urls.international.EXPAND_HOW_TO_SETUP,
'isd': urls.international.EXPAND_ISD_HOME,
}
}
def feature_flags(request):
return {'features': settings.FEATURE_FLAGS}
| 6,065 | 2,242 |
# encoding='utf-8'
'''
/**
* This is the solution of No.316 problem in the LeetCode,
* the website of the problem is as follow:
* https://leetcode-cn.com/problems/smallest-subsequence-of-distinct-characters
* <p>
* The description of problem is as follow:
* ==========================================================================================================
* 返回字符串 text 中按字典序排列最小的子序列,该子序列包含 text 中所有不同字符一次。
* <p>
* 示例 1:
* <p>
* 输入:"cdadabcc"
* 输出:"adbc"
* 示例 2:
* <p>
* 输入:"abcd"
* 输出:"abcd"
* <p>
* 来源:力扣(LeetCode)
* 链接:https://leetcode-cn.com/problems/smallest-subsequence-of-distinct-characters
* 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
* ==========================================================================================================
*
* @author zhangyu (zhangyuyu417@gmail.com)
*/
'''
class Solution:
def remove_duplicate_letters(self, s: str) -> str:
'''
移除重复字符
Args:
s: 字符串
Returns:
字典排序表
'''
nums_map = self.get_num_map(s)
in_stack_map = {}
stack = []
for ch in s:
nums_map[ch] -= 1
if ch in in_stack_map and in_stack_map[ch]:
continue
while len(stack) > 0 and ord(ch) < ord(stack[-1]) and nums_map[ch] > 0:
in_stack_map[stack[-1]] = False
stack.pop()
stack.append(ch)
in_stack_map[ch] = True
return ''.join(stack)
def get_num_map(self, s: str):
'''
统计字符出现个数
Args:
s: 字符串
Returns:
map
'''
num_map = {}
for ch in s:
if ch in num_map:
num_map[ch] += 1
else:
num_map[ch] = 1
return num_map
if __name__ == '__main__':
s = 'cdadabcc'
solution = Solution()
result = solution.remove_duplicate_letters(s)
assert result == 'adbc'
| 1,962 | 744 |
# -*- coding: utf-8 -*-
import os
from setuptools import setup
current_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(current_directory, "VERSION"), "r", encoding="utf-8") as f:
version = f.read()
with open(os.path.join(current_directory, "README.rst"), "r", encoding="utf-8") as f:
long_description = f.read()
setup(
name="interactive-shell",
version=version,
description="Interactive shell classes to easily integrate a terminal in application.",
long_description=long_description,
license="MIT License",
author="Julien Vaslet",
author_email="julien.vaslet@gmail.com",
url="https://github.com/julienvaslet/interactive-shell",
packages=["interactive_shell"],
install_requires=[],
scripts=[],
classifiers=[
"Development Status :: 1 - Planning",
"Environment :: Console",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development",
"Topic :: Terminals"
]
)
| 1,055 | 330 |
#!/usr/bin/env python
"""
generate the planet market
"""
from spacetrading.create_svg.svg_commands import Svg
from spacetrading.create_svg import generate_svg_symbols
def draw_planet(svg, planet, name, fill_colour):
"""
actually draw the planet market
"""
x_shift = 30
y_shift = [0, 30, 80]
x_offset = [1.5*x_shift, 1.5*x_shift, x_shift/2]
y_offset = 30
scale_factor = 3/2
font_size_price = 12
font_size_header = 11
left = x_offset[2]/2
right = 1.5*x_offset[2] + 7*x_shift
top = y_offset - 10
bottom = y_offset + y_shift[1] + y_shift[2] + 10
vertical_middle = y_offset + y_shift[1] + (y_shift[1]-y_shift[0]) + \
(y_shift[2] - (2*y_shift[1]-y_shift[0]))/2
horizontal_middle = 120
svg.create_path(
(
"M {left},{top} V {bottom} " +
"C {left_2},{bottom_2} {right_2},{bottom_2} {right},{bottom} " +
"V {top} C {right_2},{top_2} {left_2},{top_2} {left},{top}").format(
left=left, right=right, top=top, bottom=bottom,
left_2=left+20, right_2=right-20, bottom_2=bottom+20, top_2=top-20
),
stroke_colour="black",
fill_colour=fill_colour,
id_name="box_{}".format(name)
)
for i in range(1, 8):
svg.create_text(
"{}_pricetext_{}".format(name, i),
[x_offset[2] + (i-0.5)*x_shift, vertical_middle + font_size_price/2],
str(i),
font_size=font_size_price,
text_align="center",
text_anchor="middle",
font_weight="bold"
)
size_ellipse = [80, 10]
offset_border_ellipse = 9
svg.create_ellipse(
size_ellipse,
[horizontal_middle, top - offset_border_ellipse],
"black",
"ellipse_top_{}".format(name),
fill="white",
stroke_width="1",
stroke_opacity="1",
opacity="1"
)
svg.create_text(
"demand_text_{}".format(name),
[horizontal_middle, top - offset_border_ellipse + font_size_header/2],
"Demand",
font_size=font_size_header,
text_align="center",
text_anchor="middle",
font_weight="bold"
)
svg.create_ellipse(
size_ellipse,
[horizontal_middle, bottom + offset_border_ellipse],
"black",
"ellipse_bottom_{}".format(name),
fill="white",
stroke_width="1",
stroke_opacity="1",
opacity="1"
)
svg.create_text(
"supply_text_{}".format(name),
[horizontal_middle, bottom + offset_border_ellipse + font_size_header/2],
"Supply",
font_size=font_size_header,
text_align="center",
text_anchor="middle",
font_weight="bold"
)
resources = [planet.planet_demand_resource, planet.planet_supply_resource]
prices = [
planet.planet_demand_resource_price,
planet.planet_supply_resource_price
]
for row in range(2):
for column in range(6):
if row == 1:
price = column + 1
else:
price = column + 2
if price is prices[row]:
symbolname = generate_svg_symbols.get_symbol_name(resources[row])
else:
symbolname = generate_svg_symbols.get_symbol_name('0')
svg.use_symbol(
symbolname,
"{}_name_{}_row_{}_column".format(name, row, column),
position=[(x_offset[row + 1] + column*x_shift)/scale_factor,
(y_offset + y_shift[row + 1])/scale_factor],
additional_arguments={
"transform": f"scale({scale_factor})"
}
)
def draw_planet_market(planets):
svgs = []
for planet in planets:
svg = Svg(width=240, height=170,
id_name="svg_planet_market_{}".format(planet.name))
generate_svg_symbols.add_posibility_for_disc_3d(svg)
generate_svg_symbols.add_posibility_for_empty_res(svg)
generate_svg_symbols.add_posibility_for_red_cross(svg)
generate_svg_symbols.add_posibility_for_radioactive(svg)
generate_svg_symbols.add_posibility_for_food(svg)
generate_svg_symbols.add_posibility_for_water(svg)
generate_svg_symbols.add_posibility_for_building_res(svg)
draw_planet(svg, planet, 'planet_market_{}'.format(planet.name), planet.colour)
svgs.append(svg)
return svgs
if __name__ == '__main__':
pass
| 4,530 | 1,557 |
import subprocess
import sys
import os
while True:
line = input('> ')
exec = line.strip().split(' ')
status = subprocess.run(exec)
| 146 | 48 |
from sly import Lexer, Parser
import vm
class MyLexer(Lexer):
tokens = {IDENTIFIER, NUMBER, SEMICOLON, PLUS, MINUS,
ASTERISK, FORWARD_SLASH, EQUALS, KEYWORD_VAR}
KEYWORD_VAR = r'var'
IDENTIFIER = r'[a-zA-Z_][a-zA-Z0-9_]*'
NUMBER = r'[0-9]+'
SEMICOLON = r';'
PLUS = r'\+'
MINUS = r'\-'
ASTERISK = r'\*'
FORWARD_SLASH = r'\/'
EQUALS = r'='
ignore = ' \t\n'
class MyParser(Parser):
tokens = MyLexer.tokens
def __init__(self, bytecode: vm.Bytecode):
self.bytecode = bytecode
self.storageIndex = 0
self.storageTable = {}
@_("statements statement")
def statements(self, p):
pass
@_("statement")
def statements(self, p):
pass
@_("KEYWORD_VAR IDENTIFIER EQUALS expr SEMICOLON")
def statement(self, p):
self.storageTable[p.IDENTIFIER] = self.storageIndex
self.bytecode.emit(vm.Instruction.STORE)
self.bytecode.emit(self.storageIndex)
self.storageIndex += 1
@_("expr PLUS expr1",
"expr MINUS expr1")
def expr(self, p):
if p[1] == '+':
self.bytecode.emit(vm.Instruction.ADD)
else:
self.bytecode.emit(vm.Instruction.SUB)
@_("expr1")
def expr(self, p):
pass
@_("expr1 ASTERISK expr2",
"expr1 FORWARD_SLASH expr2")
def expr1(self, p):
if p[1] == '*':
self.bytecode.emit(vm.Instruction.MUL)
else:
self.bytecode.emit(vm.Instruction.DIV)
@_("expr2")
def expr1(self, p):
pass
@_("NUMBER")
def expr2(self, p):
self.bytecode.emit(vm.Instruction.PUSH)
self.bytecode.emit(int(p[0]))
@_("IDENTIFIER")
def expr2(self, p):
self.bytecode.emit(vm.Instruction.LOAD)
self.bytecode.emit(self.storageTable[p.IDENTIFIER])
| 1,858 | 729 |
from abc import abstractmethod
from jaxvi.abstract import ABCMeta, abstract_attribute
import jax.numpy as jnp
from jax.scipy.stats import norm, gamma
class Model(metaclass=ABCMeta):
@abstract_attribute
def latent_dim(self):
pass
@abstractmethod
def inv_T(self, zeta: jnp.DeviceArray) -> jnp.DeviceArray:
pass
@abstractmethod
def log_joint(self, theta: jnp.DeviceArray) -> jnp.DeviceArray:
pass
class LinearRegression(Model):
def __init__(self, x, y):
self.x = x
self.y = y
self.latent_dim = x.shape[1] + 1
def inv_T(self, zeta: jnp.DeviceArray) -> jnp.DeviceArray:
return jnp.append(zeta[:-1], jnp.exp(zeta[-1]))
def log_joint(self, theta: jnp.DeviceArray) -> jnp.DeviceArray:
betas = theta[:2]
sigma = theta[2]
beta_prior = norm.logpdf(betas, 0, 10).sum()
sigma_prior = gamma.logpdf(sigma, a=1, scale=2).sum()
yhat = jnp.inner(self.x, betas)
likelihood = norm.logpdf(self.y, yhat, sigma).sum()
return beta_prior + sigma_prior + likelihood
| 1,098 | 399 |
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 21 22:59:41 2018
@author: user
"""
import chainer
import numpy as np
import chainer.functions as F
x = np.arange(1, 37).reshape(1, 1, 6, 6).astype(np.float32)
x = chainer.Variable(x)
print(x)
pooled_x, indexes = F.max_pooling_2d(x, ksize=2, stride=2, return_indices=True)
print(pooled_x)
print(indexes)
upsampled_x = F.upsampling_2d(pooled_x, indexes, ksize=2, stride=2, outsize=x.shape[2:])
print(upsampled_x.shape)
print(upsampled_x.data)
upsampled_x = F.unpooling_2d(pooled_x, ksize=2, stride=2, outsize=x.shape[2:])
print(upsampled_x.shape)
print(upsampled_x.data)
# KerasのupsamplingはChainerのunpooling
# Chainerのupsamplingはindexesがないと動かない | 696 | 339 |
var = "James Bond"
print(var[2::-1]) | 36 | 17 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'resources/templates/wizard_depend_depend_version.ui'
#
# Created by: PyQt5 UI code generator 5.5.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(345, 30)
self.horizontalLayout = QtWidgets.QHBoxLayout(Form)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtWidgets.QLabel(Form)
self.label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.label_version = QtWidgets.QLabel(Form)
self.label_version.setText("")
self.label_version.setObjectName("label_version")
self.horizontalLayout.addWidget(self.label_version)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.label.setText(_translate("Form", "Version:"))
| 1,322 | 422 |
from django.db import models
class BehaviourReport(models.Model):
NOT_REVIEWED = 'not_reviewed'
UNDER_REVIEW = 'under_review'
COMPLETED = 'completed'
STATUS_CHOICES = (
(NOT_REVIEWED, 'Not reviewed'),
(UNDER_REVIEW, 'Under review'),
(COMPLETED, 'Completed')
)
# Automatic timestamping fields.
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
# Report
reporter = models.ForeignKey(
'auth.User',
on_delete=models.CASCADE,
related_name='reporter'
)
reportee = models.ForeignKey(
'auth.User',
on_delete=models.CASCADE,
related_name='reportee'
)
report = models.TextField(max_length=2000)
# Outcome/handling
public_outcome = models.CharField(max_length=255, blank=True)
private_outcome = models.CharField(max_length=255, blank=True)
status = models.CharField(
max_length=50,
choices=STATUS_CHOICES,
default=NOT_REVIEWED
)
class Meta:
ordering = ['-modified']
| 1,093 | 385 |
from setuptools import setup
setup(
version=open("rpyc_mem/_version.py").readlines()[-1].split()[-1].strip("\"'")
)
| 121 | 46 |
# Generated by Django 3.2.2 on 2021-05-10 06:15
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(help_text='제목', max_length=50)),
('show', models.CharField(choices=[('public', 'PUBLIC'), ('private', 'PRIVATE'), ('my', 'MY')], default='public', help_text='post_type', max_length=10)),
('body', models.TextField(help_text='내용')),
('datetime', models.DateTimeField(default=django.utils.timezone.now, help_text='작성시간')),
('image', models.ImageField(blank=True, null=True, upload_to='images/')),
],
),
]
| 957 | 302 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from ..action import ContainerUtilAction
from ..input import ItemType
log = logging.getLogger(__name__)
class ExecMixin(object):
"""
Utility mixin for executing configured commands inside containers.
"""
action_method_names = [
(ItemType.CONTAINER, ContainerUtilAction.EXEC_COMMANDS, 'exec_commands'),
(ItemType.CONTAINER, ContainerUtilAction.EXEC_ALL, 'exec_container_commands'),
]
def exec_commands(self, action, c_name, run_cmds, **kwargs):
"""
Runs a single command inside a container.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param c_name: Container name.
:type c_name: unicode | str
:param run_cmds: Commands to run.
:type run_cmds: list[dockermap.map.input.ExecCommand]
:return: List of exec command return values (e.g. containing the command id), if applicable, or ``None``
if either no commands have been run or no values have been returned from the API.
:rtype: list[dict] | NoneType
"""
client = action.client
exec_results = []
for run_cmd in run_cmds:
cmd = run_cmd.cmd
cmd_user = run_cmd.user
log.debug("Creating exec command in container %s with user %s: %s.", c_name, cmd_user, cmd)
ec_kwargs = self.get_exec_create_kwargs(action, c_name, cmd, cmd_user)
create_result = client.exec_create(**ec_kwargs)
if create_result:
e_id = create_result['Id']
log.debug("Starting exec command with id %s.", e_id)
es_kwargs = self.get_exec_start_kwargs(action, c_name, e_id)
client.exec_start(**es_kwargs)
exec_results.append(create_result)
else:
log.debug("Exec command was created, but did not return an id. Assuming that it has been started.")
if exec_results:
return exec_results
return None
def exec_container_commands(self, action, c_name, **kwargs):
"""
Runs all configured commands of a container configuration inside the container instance.
:param action: Action configuration.
:type action: dockermap.map.runner.ActionConfig
:param c_name: Container name.
:type c_name: unicode | str
:return: List of exec command return values (e.g. containing the command id), if applicable, or ``None``
if either no commands have been run or no values have been returned from the API.
:rtype: list[dict] | NoneType
"""
config_cmds = action.config.exec_commands
if not config_cmds:
return None
return self.exec_commands(action, c_name, run_cmds=config_cmds)
| 2,879 | 801 |
# -*- coding: utf-8 -*-
import codecs
from senapy.parsing.question_search_result_parser import parse_question_search_result
def test_parsing():
html = codecs.open('tests/resources/question_search_result.html', encoding='iso-8859-1')
url = 'http://www.senat.fr/basile/rechercheQuestion.do?off=30&rch=qa&de=20150403&au=20160403&dp=1+an&radio=dp&appr=text&aff=ar&tri=dd&afd=ppr&afd=ppl&afd=pjl&afd=cvn&_na=QG'
search_result = parse_question_search_result(url, html)
assert 330 == search_result.total_count
assert 10 == len(search_result.results)
assert 'http://www.senat.fr/questions/base/2016/qSEQ16030791G.html' == search_result.results[0].url
assert u'Partenariat entre La Poste et l\'État : maisons de services au public' == search_result.results[0].title
assert '16' == search_result.results[0].legislature
| 846 | 342 |
import signal
import numpy as np
from PIL import ImageGrab
import cv2
import time
import sys
import os
flips_time_mins = 30
interval = 5 # seconds
num_frames = flips_time_mins*60/interval
num_frames = int(num_frames)
year = -1
month = -1
day = -1
out_fps = 24
cammode = 0
shutdown_msg = False
def signal_handler(signal,frame):
print('You Pressed Ctrl+C, The Program Will Be Shutdown')
global shutdown_msg
shutdown_msg = True
print('Saving Videos')
def add_timestamp(img):
time_str= time.strftime("%Y-%m-%d %H:%M:%S")
color=(255,255,255)
if np.mean( img[700:780,900:950])>128:
color=(0,0,0)
cv2.putText(img, time_str, (900, 700) ,cv2.FONT_HERSHEY_SIMPLEX ,0.8, color ,2)
return img
capture = cv2.VideoCapture(0)
capture1 = cv2.VideoCapture(1)
cam, _ = capture.read()
cam1, _ = capture1.read()
if(cam and cam1):
print('Dual Camera Mode')
cammode = 1
elif(cam):
print('Single Camera Mode')
cammode = 2
else:
print('No Camera Detect!')
sys.exit(0)
signal.signal(signal.SIGINT,signal_handler)
# capture frames to video
while True:
if(day != time.strftime("%d")):
year = time.strftime("%Y")
month = time.strftime("%m")
day = time.strftime("%d")
hour = time.strftime("%H")
save_dir = "{0}/{1}/{2}".format(year, month, day)
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
# innner camera init
size = (int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
codec = cv2.VideoWriter.fourcc('M', 'J', 'P', 'G')
cam_filename = save_dir+"/cam_{:4}.avi".format(time.strftime("%H%M"))
video = cv2.VideoWriter(cam_filename, codec, out_fps, size)
# for low quality webcams, discard the starting unstable frames
for i in range(20):
capture.read()
# desktop screen init
desktopim = np.array(ImageGrab.grab().convert('RGB'))
# desktopFrame =np.array(desktopim.getdata(),dtype='uint8')\
# .reshape((desktopim.size[1],desktopim.size[0],3))
sp = desktopim.shape
sz1 = sp[0] # height(rows) of image
sz2 = sp[1] # width(colums) of image
desktopsize = (int(sz2),int(sz1))
codec = cv2.VideoWriter.fourcc('M', 'J', 'P', 'G')
desktop_filename = save_dir+"/desktop_{:4}.avi".format(time.strftime("%H%M"))
desktopvideo = cv2.VideoWriter(desktop_filename, codec, out_fps, desktopsize)
# outter camera init
if (cammode == 1):
size1 = (int(capture1.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(capture1.get(cv2.CAP_PROP_FRAME_HEIGHT)))
cam1_filename = save_dir+"/cam1_{:4}.avi".format(time.strftime("%H%M"))
video1 = cv2.VideoWriter(cam1_filename, codec, out_fps, size1)
# for low quality webcams, discard the starting unstable frames
for i in range(20):
capture1.read()
for i in range(num_frames):
if (shutdown_msg):
break
_, frame = capture.read()
video.write(add_timestamp(frame))
desktopim = np.array(ImageGrab.grab().convert('RGB'))
# ImageGrab and OpenCV have different color space
desktopFrame = cv2.cvtColor(desktopim, cv2.COLOR_BGR2RGB)
desktopvideo.write(add_timestamp(desktopFrame))
if (cammode == 1):
_, frame1 = capture1.read()
video1.write(add_timestamp(frame1))
time.sleep(interval)
video.release()
desktopvideo.release()
if (cammode == 1):
video1.release()
if (shutdown_msg):
break
capture.release()
if(cammode ==1):
capture1.release()
print('Done!')
print('Exit The Program')
sys.exit(0)
| 3,678 | 1,386 |
import unittest
import spacy
from spacy.language import Language
try:
from src.MordinezNLP.tokenizers import spacy_tokenizer
except:
from MordinezNLP.tokenizers import spacy_tokenizer
class TestTokenizers(unittest.TestCase):
nlp: Language = spacy.load("en_core_web_sm")
nlp.tokenizer = spacy_tokenizer(nlp)
def test_spacy_tokenizer_case1(self):
tokenized_data = self.nlp("Hello today is <date>, tomorrow it will be <number> degrees of celcius. I don't like him.")
self.assertEqual(
[str(token) for token in tokenized_data],
[
"Hello",
"today",
"is",
"<date>",
",",
"tomorrow",
"it",
"will",
"be",
"<number>",
"degrees",
"of",
"celcius",
".",
"I",
"do",
"n't",
"like",
"him",
"."
]
)
def test_spacy_tokenizer_case2(self):
tokenized_data = self.nlp('Punkt wir haben extra um <number> : <number> Uhr noch ein Event')
self.assertEqual(
[str(token) for token in tokenized_data],
[
"Punkt",
"wir",
"haben",
"extra",
"um",
"<number>",
":",
"<number>",
"be",
"<number>",
"Uhr",
"noch",
"ein",
"Event"
]
)
if __name__ == '__main__':
unittest.main()
| 1,823 | 489 |
def foo(b=1):
pass | 20 | 11 |
import random
import requests
import tempfile
from io import BytesIO
from PIL import Image, ImageDraw, ImageFont
FONTS = [
'https://cdn.statically.io/gh/google/fonts/main/ofl/neucha/Neucha.ttf',
# 'https://cdn.statically.io/gh/google/fonts/main/ofl/catamaran/Catamaran%5Bwght%5D.ttf',
# font_base_url + 'lobstertwo.ttf',
# font_base_url + 'underdog.ttf',
# font_base_url + 'specialelite.ttf',
# font_base_url + 'abrilfatface.ttf',
# font_base_url + 'merienda.ttf',
# font_base_url + 'poiretone.ttf',
# font_base_url + 'shadowsintolight.ttf',
# font_base_url + 'caveatbrush.ttf',
# font_base_url + 'gochihand.ttf',
# font_base_url + 'itim.ttf',
# font_base_url + 'rancho.ttf'
]
# thanks to https://clrs.cc
COLORS = [
{'bg': (255, 255, 255), 'fg': (100, 100, 100)}
# { 'bg': (0, 31, 63), 'fg': (128, 191, 255) },
# { 'bg': (0, 116, 217), 'fg': (179, 219, 255) },
# { 'bg': (127, 219, 255), 'fg': (0, 73, 102) },
# { 'bg': (57, 204, 204), 'fg': (0, 0, 0) },
# { 'bg': (61, 153, 112), 'fg': (22, 55, 40) },
# { 'bg': (46, 204, 64), 'fg': (14, 62, 20) },
# { 'bg': (1, 255, 112), 'fg': (0, 102, 44) },
# { 'bg': (255, 220, 0), 'fg': (102, 88, 0) },
# { 'bg': (255, 133, 27), 'fg': (102, 48, 0) },
# { 'bg': (255, 65, 54), 'fg': (128, 6, 0) },
# { 'bg': (133, 20, 75), 'fg': (235, 122, 177) },
# { 'bg': (240, 18, 190), 'fg': (101, 6, 79) },
# { 'bg': (177, 13, 201), 'fg': (239, 169, 249) },
# { 'bg': (17, 17, 17), 'fg': (221, 221, 221) },
# { 'bg': (170, 170, 170), 'fg': (0, 0, 0) },
# { 'bg': (221, 221, 221), 'fg': (0, 0, 0) }
]
def image_maker(quote_by: str, quote_body: str) -> BytesIO:
# image configuration
img_width = 612
img_height = 612
# font configuration
font_selected = random.choice(FONTS)
fontfile = requests.get(font_selected, stream=True)
font = ImageFont.truetype(BytesIO(fontfile.content), 35)
# color configuration
color = random.choice(COLORS)
# draw image
image = Image.new('RGB', (img_width, img_height), color=color['bg'])
document = ImageDraw.Draw(image)
# find the average size of the letter in quote_body
sum = 0
for letter in quote_body:
sum += document.textsize(letter, font=font)[0]
average_length_of_letter = sum/len(quote_body)
# find the number of letters to be put on each linex
number_of_letters_for_each_line = (
img_width / 1.818) / average_length_of_letter
# build new text to put on the image
incrementer = 0
fresh_quote = ''
for letter in quote_body:
if (letter == '-'):
# fresh_quote += '\n\n' + letter #add some line breaks
fresh_quote += '' + letter
elif (incrementer < number_of_letters_for_each_line):
fresh_quote += letter
else:
if(letter == ' '):
fresh_quote += '\n'
incrementer = 0
else:
fresh_quote += letter
incrementer += 1
fresh_quote += '\n\n--' + quote_by
# render the text in the center of the box
dim = document.textsize(fresh_quote, font=font)
x2 = dim[0]
y2 = dim[1]
qx = (img_width / 2 - x2 / 2)
qy = (img_height / 2 - y2 / 2)
document.text((qx, qy), fresh_quote, align="center",
font=font, fill=color['fg'])
# save image to bytes
image_io = BytesIO()
image.save(image_io, 'JPEG', quality=100)
image_io.seek(0)
return image_io
def image_maker_make_file(quote_by: str, quote_body: str) -> str:
image_io = image_maker(quote_by, quote_body)
fd, image_path = tempfile.mkstemp(suffix='.jpg')
image_file = open(image_path, 'wb')
image_file.write(image_io.getbuffer())
image_file.close()
return image_path
| 3,905 | 1,710 |