hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
59d815953d3974c813c0ea240136e075037f3a4c
| 2,032
|
py
|
Python
|
.SpaceVim/bundle/fcitx.vim/plugin/fcitx.py
|
LimeIncOfficial/Black-Box
|
2361e5a9683aaa90bad26e58b80df55675de88a6
|
[
"MIT"
] | 2
|
2022-03-26T09:14:20.000Z
|
2022-03-26T17:04:43.000Z
|
.SpaceVim/bundle/fcitx.vim/plugin/fcitx.py
|
LimeIncOfficial/Black-Box
|
2361e5a9683aaa90bad26e58b80df55675de88a6
|
[
"MIT"
] | null | null | null |
.SpaceVim/bundle/fcitx.vim/plugin/fcitx.py
|
LimeIncOfficial/Black-Box
|
2361e5a9683aaa90bad26e58b80df55675de88a6
|
[
"MIT"
] | null | null | null |
# vim:fileencoding=utf-8
import os
import vim
import socket
import struct
import contextlib
fcitxsocketfile = vim.eval('s:fcitxsocketfile')
class FcitxComm(object):
STATUS = struct.pack('i', 0)
ACTIVATE = struct.pack('i', 1 | (1 << 16))
DEACTIVATE = struct.pack('i', 1)
INT_SIZE = struct.calcsize('i')
def __init__(self, socketfile):
if socketfile[0] == '@': # abstract socket
socketfile = '\x00' + socketfile[1:]
self.socketfile = socketfile
self.sock = None
def status(self):
return self._with_socket(self._status) == 2
def activate(self):
self._with_socket(self._command, self.ACTIVATE)
def deactivate(self):
self._with_socket(self._command, self.DEACTIVATE)
def _error(self, e):
estr = str(e).replace('"', r'\"')
file = self.socketfile.replace('"', r'\"')
vim.command('echohl WarningMsg | echo "fcitx.vim: socket %s error: %s" | echohl NONE' % (file, estr))
def _connect(self):
self.sock = sock = socket.socket(socket.AF_UNIX)
sock.settimeout(0.5)
try:
sock.connect(self.socketfile)
return True
except (socket.error, socket.timeout) as e:
self._error(e)
return False
def _with_socket(self, func, *args, **kwargs):
# fcitx doesn't support connection reuse
if not self._connect():
return
with contextlib.closing(self.sock):
try:
return func(*args, **kwargs)
except (socket.error, socket.timeout, struct.error) as e:
self._error(e)
def _status(self):
self.sock.send(self.STATUS)
return struct.unpack('i', self.sock.recv(self.INT_SIZE))[0]
def _command(self, cmd):
self.sock.send(cmd)
Fcitx = FcitxComm(fcitxsocketfile)
def fcitx2en():
if Fcitx.status():
vim.command('let b:inputtoggle = 1')
Fcitx.deactivate()
def fcitx2zh():
if vim.eval('exists("b:inputtoggle")') == '1':
if vim.eval('b:inputtoggle') == '1':
Fcitx.activate()
vim.command('let b:inputtoggle = 0')
else:
vim.command('let b:inputtoggle = 0')
| 25.721519
| 105
| 0.645669
| 269
| 2,032
| 4.784387
| 0.304833
| 0.037296
| 0.043512
| 0.041958
| 0.177933
| 0.091686
| 0.051282
| 0
| 0
| 0
| 0
| 0.013572
| 0.202264
| 2,032
| 78
| 106
| 26.051282
| 0.780382
| 0.037894
| 0
| 0.1
| 0
| 0
| 0.105074
| 0.011789
| 0
| 0
| 0
| 0
| 0
| 1
| 0.183333
| false
| 0
| 0.083333
| 0.016667
| 0.45
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59d8eb391e49f53fa6bd3a0f7066dcb0749c813d
| 4,373
|
py
|
Python
|
app/check.py
|
MePyDo/pygqa
|
61cde42ee815968fdd029cc5056ede3badea3d91
|
[
"MIT"
] | 3
|
2021-02-25T13:19:52.000Z
|
2021-03-03T03:46:46.000Z
|
app/check.py
|
MedPhyDO/pygqa
|
580b2c6028d2299790a38262b795b8409cbfcc37
|
[
"MIT"
] | null | null | null |
app/check.py
|
MedPhyDO/pygqa
|
580b2c6028d2299790a38262b795b8409cbfcc37
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
__author__ = "R. Bauer"
__copyright__ = "MedPhyDO - Machbarkeitsstudien des Instituts für Medizinische Strahlenphysik und Strahlenschutz am Klinikum Dortmund im Rahmen von Bachelor und Masterarbeiten an der TU-Dortmund / FH-Dortmund"
__credits__ = ["R.Bauer", "K.Loot"]
__license__ = "MIT"
__version__ = "0.1.0"
__status__ = "Prototype"
import matplotlib.pyplot as plt
from isp.plot import plotClass
import logging
logger = logging.getLogger( "MQTT" )
class ispCheckClass( plotClass ):
""" Hilfsfunktionen für alle check Module
Attributes
----------
image : instance of BaseImage
baseImage : instance of BaseImage
das zum normalisieren zu verwendende Bild
infos : dict
die infos aus self.image.infos
checkField : dict
die für die Tests zu verwendende Bildinformatioen
baseField : dict
die für das normalisieren zu verwendende Bildinformatioen
"""
def __init__( self, image=None, baseImage=None, normalize="none" ):
"""Check Klasse initialisieren
"""
#self.checkField = None
self.image = image
self.baseImage = baseImage
#self.baseField = None
self.infos = None
# ist auch das baseImage da dann ggf normalisieren
if not self.baseImage == None:
self.normalize( normalize )
# infos auch über die eigene Klasse erreichbar machen
if not self.image == None:
self.infos = self.image.infos
#print("ispCheckClass.__init__",self.baseImage, normalize)
def show(self):
'''
Ruft plt.show auf um die erzeugten Grafiken auszugeben
Returns
-------
None.
'''
plt.show()
def normalize( self, normalize: str="diff" ):
'''Normalisiert checkField mit baseField
in self.image.array liegen anschließend die normalisierten Daten
Parameters
----------
normalize : str, optional
Art der Normalisierung. The default is "diff".
- none: keine Normalisierung durchführen
- diff: test / open
- prozent: (test - open) / open
Returns
-------
None.
'''
# image.array als image.arrayOriginal merken
self.image.arrayOriginal = self.image.array.copy()
#print("### ispCheckClass.normalize", self.image, self.baseImage, normalize)
"""
if basefilename:
if self.debug:
print("---------------------------")
print("OpenImage: %s, min: %1.3f, max: %1.3f, DPMM: %1.3f, DPI: %1.3f, CAX-x: %1.3f CAX-y:%1.3f"
% (self.openfilename, np.amin(openImg.array), np.amax(openImg.array),
openImg.dpmm, openImg.dpi, openImg.cax.x, openImg.cax.y ) )
self.printMetaInfo( openImg.metadata )
if self.debug:
print("---------------------------")
print("CheckImage: %s, min: %1.3f, max: %1.3f, DPMM: %1.3f, DPI: %1.3f, CAX-x: %1.3f CAX-y:%1.3f"
% (testfilename, np.amin(checkImage.array), np.amax(checkImage.array),
checkImage.dpmm, checkImage.dpi, checkImage.cax.x, checkImage.cax.y ) )
self.printMetaInfo( checkImage.metadata )
"""
base = self.baseImage.array.copy()
check = self.image.array.copy()
if normalize == "diff":
# Beide Arrays um 0.000001 erhöhen und geschlossenes durch offene teilen
self.image.array = (check + 0.000001) / (base + 0.000001)
elif normalize == "prozent":
self.image.array = ( (check + 0.000001) - (base + 0.000001) ) / (base + 0.000001)
def getMeanDose( self, field=None ):
"""Die mittlere Dosis eines Angegebenen Bereichs ermitteln
"""
if not field: # pragma: no cover
field = { "X1":-2, "X2": 2, "Y1": -2, "Y2":2 }
# holt den angegebenen Bereich um dort die Dosis zu bestimmen
roi = self.image.getRoi( field ).copy()
#print( roi.mean() )
return roi.mean()
#print( self.metadata )
| 31.460432
| 209
| 0.552938
| 459
| 4,373
| 5.198257
| 0.400871
| 0.049036
| 0.029338
| 0.015088
| 0.086756
| 0.064543
| 0.064543
| 0.064543
| 0.064543
| 0.033529
| 0
| 0.026477
| 0.326321
| 4,373
| 139
| 210
| 31.460432
| 0.783435
| 0.333638
| 0
| 0
| 0
| 0.029412
| 0.149683
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.088235
| 0
| 0.264706
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59de4c7689c2768a33575c05886260864bed92d5
| 4,354
|
py
|
Python
|
src/services/weixin.py
|
jizhouli/ladder-tournament
|
0ecc072e79416f91f344a184d54ba4e5cf1a3167
|
[
"MIT"
] | null | null | null |
src/services/weixin.py
|
jizhouli/ladder-tournament
|
0ecc072e79416f91f344a184d54ba4e5cf1a3167
|
[
"MIT"
] | null | null | null |
src/services/weixin.py
|
jizhouli/ladder-tournament
|
0ecc072e79416f91f344a184d54ba4e5cf1a3167
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import hashlib
import base64
from Crypto.Cipher import AES
import json
from src.services.utils import Request
class WXAPPError(Exception):
def __init__(self, code, description):
self.code = code
self.description = description
def __str__(self):
return '%s: %s' % (self.code, self.description)
class WXAPPAPI(object):
# 默认https
host = "api.weixin.qq.com"
def __init__(self, appid=None, app_secret=None):
self.appid = appid
self.app_secret = app_secret
def pre_params(self):
return dict(secret=self.app_secret,
appid=self.appid)
def jscode2session(self, js_code):
path = '/sns/jscode2session'
params = self.pre_params()
params.update(js_code=js_code,
grant_type='authorization_code')
response = Request.get(self.host, path, params)
content = json.loads(response.content.decode())
if content.get('errcode', 0):
raise WXAPPError(content.get('errcode', 0),
content.get("errmsg", ""))
return content
def client_credential_for_access_token(self):
path = '/cgi-bin/token'
params = self.pre_params()
params.update(grant_type='client_credential')
response = Request.get(self.host, path, params)
content = json.loads(response.content.decode())
if content.get('errcode', 0):
raise WXAPPError(content.get('errcode', 0),
content.get("errmsg", ""))
return content
def getwxacode(self, access_token, page_path):
# 接口A 数量有限 A+C 100000个
path = '/wxa/getwxacode?access_token=%s' % access_token
params = {
'path': page_path,
}
response = Request.post(self.host, path, params)
try:
content = json.loads(response.content.decode())
if content.get('errcode', 0):
raise WXAPPError(content.get('errcode', 0),
content.get("errmsg", ""))
return content, None
except:
return base64.standard_b64encode(response.content), len(response.content)
def getwxacodeunlimit(self, access_token, scene):
# 接口B 数量无限 scene strint(32)
path = '/wxa/getwxacodeunlimit?access_token=%s' % access_token
params = {
'scene': scene,
}
response = Request.post(self.host, path, params)
try:
content = json.loads(response.content.decode())
if content.get('errcode', 0):
raise WXAPPError(content.get('errcode', 0),
content.get("errmsg", ""))
return content, None
except:
return base64.standard_b64encode(response.content), len(response.content)
def createwxaqrcode(self, access_token, page_path):
# 接口C 数量有限 A+C 100000个
path = '/cgi-bin/wxaapp/createwxaqrcode?access_token=%s' % access_token
params = {
'path': page_path,
}
response = Request.post(self.host, path, params)
try:
content = json.loads(response.content.decode())
if content.get('errcode', 0):
raise WXAPPError(content.get('errcode', 0),
content.get("errmsg", ""))
return content, None
except:
return base64.standard_b64encode(response.content), len(response.content)
class WXBizDataCrypt:
def __init__(self, appid, session_key):
self.app_id = appid
self.session_key = session_key
def decrypt(self, encryptedData, iv):
# base64 decode
sessionKey = base64.b64decode(self.session_key)
encryptedData = base64.b64decode(encryptedData)
iv = base64.b64decode(iv)
cipher = AES.new(sessionKey, AES.MODE_CBC, iv)
decrypted = json.loads(self._unpad(cipher.decrypt(encryptedData)))
if decrypted['watermark']['appid'] != self.app_id:
raise Exception('Invalid Buffer')
return decrypted
def check_raw_data(self, raw_data, session_key, signature):
return hashlib.sha1(raw_data + session_key).hexdigest() == signature
def _unpad(self, s):
return s[:-ord(s[len(s) - 1:])]
| 34.015625
| 85
| 0.590492
| 475
| 4,354
| 5.275789
| 0.24
| 0.059856
| 0.067837
| 0.071828
| 0.505986
| 0.474062
| 0.437749
| 0.437749
| 0.437749
| 0.437749
| 0
| 0.018549
| 0.294212
| 4,354
| 127
| 86
| 34.283465
| 0.796941
| 0.025494
| 0
| 0.464646
| 0
| 0
| 0.082153
| 0.027384
| 0
| 0
| 0
| 0
| 0
| 1
| 0.131313
| false
| 0
| 0.050505
| 0.040404
| 0.353535
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59de4f9a0481ba9dea9ad73ed402abe9cfca87bf
| 15,265
|
py
|
Python
|
Aplicacao/app.py
|
JuniorB5/App-desktop-caderno-de-contas
|
7f57656012e70a8641945d9b4caeb81adf6d25b2
|
[
"MIT"
] | null | null | null |
Aplicacao/app.py
|
JuniorB5/App-desktop-caderno-de-contas
|
7f57656012e70a8641945d9b4caeb81adf6d25b2
|
[
"MIT"
] | null | null | null |
Aplicacao/app.py
|
JuniorB5/App-desktop-caderno-de-contas
|
7f57656012e70a8641945d9b4caeb81adf6d25b2
|
[
"MIT"
] | null | null | null |
from tkinter import *
from placeholder import EntPlaceHold
from gradiente import GradientFrame
from tkinter import ttk, messagebox
from funcoes_sqlite import FuncsSqlite
from funcoes_txt import ArquivosTexto
import os
class Front(FuncsSqlite, ArquivosTexto):
def __init__(self):
self.window = Tk()
self.CreateTable()
self.path = os.getcwd() # Caminho do diretório para referenciar as imagens em qualquer computador
def WindowConfigure(self):
"""
Faz as configurações inicais da tela e põe a imagem de fundo na Interface
"""
self.back_interface = PhotoImage(
file=os.path.join(self.path, 'img\\tecnology7.png'))
label_interface = Label(self.window, image=self.back_interface, bg='white')
self.window.geometry("1919x1056")
self.window.title('Contas')
label_interface.pack()
def CreateFrames(self):
"""
Esse método cria todos os frames da aplicação de uma vez sem definir a posição deles. Tornado mais fácil sua
manipulação.
"""
self.frame_cad = GradientFrame(self.window, bd=4, bg='#dfe3ee', highlightbackground='#759fe6',
highlightthickness=2,
color1='#00FF00', color2='#ADFF2F')
self.frame_del = GradientFrame(self.window, bd=4, bg='#dfe3ee', highlightbackground='#759fe6',
highlightthickness=2,
color1='#FFD700', color2='#FF0000')
self.frame_lista = GradientFrame(self.window, bd=4, highlightbackground='#759fe6',
highlightthickness=2,
color1='#00BFFF', color2='#1E90FF')
self.frame_creditos = GradientFrame(self.window, bd=4, highlightbackground='#759fe6',
highlightthickness=2,
color1='#00BFFF', color2='#1E90FF')
def WidgetsIniciais(self):
"""
Cria os botões da Interface Inicial
"""
# Ícones dos botões da Interface Inicial
self.img_cadastro = PhotoImage(
file=os.path.join(self.path, 'img\\cliente_cadastro.png'))
self.img_excluir = PhotoImage(
file=os.path.join(self.path, 'img\\cliente_delete.png'))
self.img_conta = PhotoImage(
file=os.path.join(self.path, 'img\\atualizando.png'))
# Label e Botão de cadastro
self.button_cadastro = Button(self.window, image=self.img_cadastro, cursor='hand2',
command=self.IntCadastro)
self.button_cadastro.place(relx=0.02, rely=0.07)
self.label_cadastro = Label(self.window, text='Cadastrar Cliente', font=("Verdana Bold", 12), bg='white')
self.label_cadastro.place(relx=0.02, rely=0.05)
# Label e Botão de Exclusão do cliente
self.button_excluir = Button(self.window, image=self.img_excluir, cursor='hand2', command=self.IntDelete)
self.button_excluir.place(relx=0.02, rely=0.3)
self.label_excluir = Label(self.window, text='Deletar Cliente', font=("Verdana Bold", 12), bg='white')
self.label_excluir.place(relx=0.02, rely=0.28)
# Label e Botão da conta
self.button_conta = Button(self.window, image=self.img_conta, cursor='hand2', command=self.IntVerConta)
self.button_conta.place(relx=0.02, rely=0.53)
self.label_conta = Label(self.window, text='Ver Conta', font=("Verdana Bold", 12), bg='white')
self.label_conta.place(relx=0.02, rely=0.51)
# Essa função limpa os widgets iniciais da interface para prosseguir para outra sessão.
def LimparWidgetsIniciais(self):
self.widgtes_principais = [self.button_cadastro, self.button_excluir,
self.button_conta,
self.label_cadastro, self.label_excluir,
self.label_conta]
for widget in self.widgtes_principais:
widget.place_forget()
# Essa função cria os Widgets que serão utilizados na função 'IntCadastro()' que criará a interface de cadastro.
def WidgetsCadastro(self):
self.frame_cad.place(relx=0.03, rely=0.1, relwidth=0.3, relheight=0.15)
# Label e Entry do nome do cliente
self.label_nome = Label(self.frame_cad, text='Nome', fg='black', bg='#7CFC00')
self.label_nome.place(relx=0.22, rely=0.4)
self.entry_nome = EntPlaceHold(self.frame_cad, 'Digite o nome do cliente para cadastrá-lo')
self.entry_nome.place(relx=0.32, rely=0.4, relwidth=0.5)
self.btn_cadastro = Button(self.frame_cad, text='Cadastrar', bd=4, command=self.ComandoCadastro)
self.btn_cadastro.place(relx=0.46, rely=0.6)
# Essa função cria os Widgets que serão utilizados na função 'IntDelete()' que criará o frame de deletar cliente
def WidgetsDelete(self):
self.frame_del.place(relx=0.03, rely=0.1, relwidth=0.3, relheight=0.15)
self.label_nome_delete = Label(self.frame_del, text='Nome', fg='black', bg='#FFA500')
self.label_nome_delete.place(relx=0.22, rely=0.4)
self.entry_nome_del = EntPlaceHold(self.frame_del, 'Digite o nome do cliente para Deletá-lo')
self.entry_nome_del.place(relx=0.32, rely=0.4, relwidth=0.5)
self.btn_delete = Button(self.frame_del, text='Deletar', bd=4, command=self.ComandoDelete)
self.btn_delete.place(relx=0.46, rely=0.6)
# Essa função cria os widgets relacionados ao botão Ver Conta
def WidgetsListaVerConta(self):
"""
Elementos relacionados a sessão Ver Conta
"""
# imagens
self.icone_pesquisa = PhotoImage(
file=os.path.join(self.path, 'img\\icone_pesquisa.png'))
self.icone_atualizar_conta = PhotoImage(
file=os.path.join(self.path, 'img\\icone_att.png'))
self.icone_abrir = PhotoImage(
file=os.path.join(self.path, 'img\\icone_abrir.png'))
# Lista onde aparecem os clientes e suas contas.
self.lista_ver_conta = ttk.Treeview(self.frame_lista, height=3, column='nome')
self.frame_lista.place(relx=0.28, rely=0.05, relwidth=0.4, relheight=0.9)
self.entry_buscar_lista = EntPlaceHold(self.frame_lista, 'Digite o nome do cliente')
self.entry_buscar_lista.place(relx=0.25, rely=0.03, relwidth=0.5)
self.botao_buscar_lista = Button(self.frame_lista, image=self.icone_pesquisa, command=self.ComandoBuscarCliente)
self.botao_buscar_lista.place(relx=0.76, rely=0.027)
self.area_txt = Text(self.frame_lista)
self.area_txt.place(relx=0.2, rely=0.6, relwidth=0.6, relheight=0.37)
self.area_txt.configure(font=('Courier', 13), bg='#D3D3D3')
self.botao_abrir_conta = Button(self.frame_lista, image=self.icone_abrir, command=self.InsertArchEntry)
self.botao_abrir_conta.place(relx=0.812, rely=0.62)
self.botao_atualizar = Button(self.frame_lista, image=self.icone_atualizar_conta, command=self.CommandAttAcount)
self.botao_atualizar.place(relx=0.812, rely=0.68)
self.lista_ver_conta.heading('#0', text='')
self.lista_ver_conta.heading('#1', text='Clientes')
self.lista_ver_conta.column('#0', width=1)
self.lista_ver_conta.column('#1', width=100)
self.lista_ver_conta.place(relx=0.3, rely=0.07, relwidth=0.4, relheight=0.5)
self.Scroollista = Scrollbar(self.frame_lista, orient='vertical')
self.lista_ver_conta.configure(yscroll=self.Scroollista.set)
self.Scroollista.place(relx=0.67, rely=0.071, relwidth=0.03, relheight=0.499)
self.SelectLista(self.lista_ver_conta)
self.lista_ver_conta.bind('<Double-1>', self.SelectDoubleClick)
def WidgetsCreditos(self):
self.frame_creditos.place(relx=0.28, rely=0.05, relwidth=0.4, relheight=0.9)
self.imagem_back = PhotoImage(file=r'/AppDesktopMercadinho/img/back_interface_credito.png')
self.imagem_icone_cadastro = PhotoImage(file=r'/AppDesktopMercadinho/img/cliente_cadastro.png')
self.imagem_icone_atualizar = PhotoImage(file=r'/AppDesktopMercadinho/img/atualizando.png')
self.label_back = Label(self.frame_creditos, image=self.imagem_back)
self.label_cadastro_credito = Label(self.frame_creditos, image=self.imagem_icone_cadastro)
self.label_delete_credito = Label(self.frame_creditos, image=self.imagem_icone_atualizar)
self.label_back.place(relx=0.05, rely=0.1)
self.label_cadastro_credito.place(relx=0.05, rely=0.4)
self.label_delete_credito.place(relx=0.28, rely=0.4)
self.label_back_texto = Label(self.frame_creditos, text='Link da Imagem original: https://pt.vecteezy.com/vetor-gratis/computador')
self.label_back_texto.place(relx=0.46, rely=0.2)
self.label_icones_texto = Label(self.frame_creditos, text=f'Links do Site original dos ícones:{os.linesep} https://www.freepik.com{os.linesep}'
'https://www.flaticon.com/br/')
self.label_icones_texto.place(relx=0.61, rely=0.44)
# As funções abaixo são os comandos que serão executados ao acionar os botões da interface inicial
def IntCadastro(self):
self.LimparWidgetsIniciais()
self.WidgetsCadastro()
def IntDelete(self):
self.LimparWidgetsIniciais()
self.WidgetsDelete()
def IntVerConta(self):
self.LimparWidgetsIniciais()
self.WidgetsListaVerConta()
def ComandoCadastro(self):
"""
Essa função gera o comando que irá cadastrar o cliente no banco de dados.
"""
nome_cadastro = self.entry_nome.get()
if nome_cadastro == '' or nome_cadastro == 'Digite o nome do cliente para cadastrá-lo':
messagebox.showerror('Erro', 'É preciso digitar o nome do cliente para cadastrá-lo')
else:
try:
self.CreateClient(nome_cadastro)
self.CreateArch(nome_cadastro)
messagebox.showinfo('Sucesso', 'Cliente Cadastrado com sucesso!')
except:
messagebox.showerror('Erro', 'Houve um erro ao cadastrar o cliente')
def ComandoDelete(self):
"""
Essa função gera o comando que irá deletar o cliente do banco de dados. Ao apertar o botão.
"""
nome_delete = self.entry_nome_del.get()
if nome_delete == '' or nome_delete == 'Digite o nome do cliente para Deletá-lo':
messagebox.showerror('Erro', 'Digite um nome válido')
else:
confirm_delete = messagebox.askyesno('Confirme', 'Você tem certeza que deseja deletar esse cliente?')
if confirm_delete:
try:
self.DeleteClient(nome_delete)
self.DeleteArch(nome_delete)
messagebox.showinfo('Sucesso', 'Cliente deletado com sucesso')
except FileNotFoundError as erro:
print(erro)
messagebox.showerror('Erro', 'Houve um erro ao deletar o cliente do banco. Verifique se o cliente '
'já foi excluído do banco de dados')
def InsertArchEntry(self):
""" Esta Função insere o conteúdo do arquivo txt na area de texto do tkinter"""
self.area_txt.delete('1.0', END)
try:
nome = self.entry_buscar_lista.get()
arquivo_read = self.ReadAccount(nome)
self.area_txt.insert(END, arquivo_read)
except FileNotFoundError:
messagebox.showerror('Erro', 'O cliente digitado não está cadastrado na lista')
def CommandAttAcount(self):
""" Função que atualiza o conteúdo do arquivo txt"""
nome_entry = self.entry_buscar_lista.get()
if nome_entry == '' or nome_entry == 'Digite o nome do cliente':
messagebox.showerror('Erro', 'Digite um nome válido')
else:
confirm_att = messagebox.askyesno('Confirme', 'Você deseja realmente atualizar a conta desta maneira?')
if confirm_att:
try:
txt_anotacoes = self.area_txt.get('1.0', END)
self.WriteAccount(nome_entry, txt_anotacoes)
messagebox.showinfo('Sucesso', 'Conta atualizada com sucesso!')
except FileNotFoundError as erro:
print(erro)
messagebox.showerror('Erro', 'O cliente selecionado não está cadastrado. Verifique se o nome está '
'esxrito exatamente igual como está na lista!')
def ComandoBuscarCliente(self):
""" Comando que seleciona o cliente na lista ao buscar ele"""
self.BuscarCliente(self.lista_ver_conta, self.entry_buscar_lista)
def Menu(self):
"""
Menu do topo da tela
"""
self.barra_menu = Menu(self.window)
self.window.config(menu=self.barra_menu)
self.menu_volta = Menu(self.barra_menu)
self.barra_menu.add_cascade(label='Menu', menu=self.menu_volta)
self.menu_volta.add_command(label='Voltar', command=self.BackMenu)
self.menu_volta.add_command(label='Créditos', command=self.MostrarCreditos)
def SelectDoubleClick(self, event):
"""Função que insere o nome do cliente dentro da entry de busca ao dar um duplo clique no nome do cliente"""
self.entry_buscar_lista.delete(0, END)
self.lista_ver_conta.selection()
for nome in self.lista_ver_conta.selection():
nome_ins = self.lista_ver_conta.item(nome, 'values')
for n in nome_ins:
self.entry_buscar_lista.insert(END, n)
def BackMenu(self):
"""
Comando que retorna para a interface Inicial a partir de qualquer sessão
"""
if self.frame_cad:
self.frame_cad.place_forget()
if self.frame_del:
self.frame_del.place_forget()
if self.frame_lista:
self.frame_lista.place_forget()
if self.frame_creditos:
self.frame_creditos.place_forget()
self.WidgetsIniciais()
def LimparTodosWidgets(self):
"""
Comando que apaga todos os Widgets da tela. Que será utilizado para mostrar os créditos das imagens na opção créditos do menu.
"""
if self.frame_cad:
self.frame_cad.place_forget()
if self.frame_del:
self.frame_del.place_forget()
if self.frame_lista:
self.frame_lista.place_forget()
if self.button_cadastro:
self.LimparWidgetsIniciais()
def MostrarCreditos(self):
"""
Comando que 'chama' a interface que mostra os créditos das imagen na tela.
"""
self.LimparTodosWidgets()
self.WidgetsCreditos()
def Iniciar(self):
"""
Método que inicia a aplicação
"""
self.WindowConfigure()
self.CreateFrames()
self.WidgetsIniciais()
self.Menu()
self.window.mainloop()
app = Front()
app.Iniciar()
| 42.402778
| 151
| 0.634261
| 1,913
| 15,265
| 4.936226
| 0.195504
| 0.038123
| 0.029652
| 0.023404
| 0.366727
| 0.288256
| 0.255851
| 0.21095
| 0.182993
| 0.126761
| 0
| 0.026393
| 0.260334
| 15,265
| 359
| 152
| 42.520891
| 0.809937
| 0.117458
| 0
| 0.164444
| 0
| 0.004444
| 0.127362
| 0.015939
| 0
| 0
| 0
| 0.008357
| 0
| 1
| 0.102222
| false
| 0
| 0.031111
| 0
| 0.137778
| 0.008889
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59e68f6b671a9c6215090e86777cb82fbf2376fb
| 586
|
py
|
Python
|
app/api/mock_server.py
|
Peopple-Shopping-App/mockserver
|
c38c3f325e44f4eaba39cdbe24544e3181307218
|
[
"MIT"
] | 1
|
2021-07-23T03:43:19.000Z
|
2021-07-23T03:43:19.000Z
|
app/api/mock_server.py
|
Peopple-Shopping-App/mockserver
|
c38c3f325e44f4eaba39cdbe24544e3181307218
|
[
"MIT"
] | null | null | null |
app/api/mock_server.py
|
Peopple-Shopping-App/mockserver
|
c38c3f325e44f4eaba39cdbe24544e3181307218
|
[
"MIT"
] | null | null | null |
import json
import os.path
import falcon
from app.__root__ import __path__
class MockServerResource:
def __init__(self):
self.__root_path__ = __path__()
async def on_get(self, req, resp, route: str):
if not route.endswith(".json"):
route += ".json"
filepath = os.path.join(self.__root_path__, "assets", route)
if not os.path.exists(filepath):
raise falcon.HTTPNotImplemented("File Requested Not Available")
with open(filepath, 'r') as fp:
data = json.load(fp)
resp.text = json.dumps(data)
| 26.636364
| 75
| 0.633106
| 74
| 586
| 4.648649
| 0.554054
| 0.052326
| 0.069767
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.257679
| 586
| 21
| 76
| 27.904762
| 0.790805
| 0
| 0
| 0
| 0
| 0
| 0.076792
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.25
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59e991030ccd113468742884f239af378d4fd07a
| 703
|
py
|
Python
|
modules/duckduckgo.py
|
skinatro/discord-bot
|
b9b717c61c1d83cc420d5b8be62534d56a3bca50
|
[
"MIT"
] | null | null | null |
modules/duckduckgo.py
|
skinatro/discord-bot
|
b9b717c61c1d83cc420d5b8be62534d56a3bca50
|
[
"MIT"
] | null | null | null |
modules/duckduckgo.py
|
skinatro/discord-bot
|
b9b717c61c1d83cc420d5b8be62534d56a3bca50
|
[
"MIT"
] | null | null | null |
import asyncio
import aiohttp
async def duck_search(search_term):
search_term = "+".join(search_term.split())
url = f"https://api.duckduckgo.com/?q={search_term}&format=json"
async with aiohttp.ClientSession() as session:
async with session.get(url) as r:
if r.status == 200:
content = await r.json(content_type="application/x-javascript")
result = content["AbstractText"]
if content["AbstractText"] == "":
result = content["RelatedTopics"][0]["Text"]
return result
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(duck_search(input("> ")))
| 33.47619
| 79
| 0.611664
| 81
| 703
| 5.074074
| 0.592593
| 0.097324
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007648
| 0.256046
| 703
| 20
| 80
| 35.15
| 0.778203
| 0
| 0
| 0
| 0
| 0
| 0.186344
| 0.034139
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
59ebe1657d09ae546bdf3ca41d37de806404c083
| 1,709
|
py
|
Python
|
prettyqt/widgets/statusbar.py
|
phil65/PrettyQt
|
26327670c46caa039c9bd15cb17a35ef5ad72e6c
|
[
"MIT"
] | 7
|
2019-05-01T01:34:36.000Z
|
2022-03-08T02:24:14.000Z
|
prettyqt/widgets/statusbar.py
|
phil65/PrettyQt
|
26327670c46caa039c9bd15cb17a35ef5ad72e6c
|
[
"MIT"
] | 141
|
2019-04-16T11:22:01.000Z
|
2021-04-14T15:12:36.000Z
|
prettyqt/widgets/statusbar.py
|
phil65/PrettyQt
|
26327670c46caa039c9bd15cb17a35ef5ad72e6c
|
[
"MIT"
] | 5
|
2019-04-17T11:48:19.000Z
|
2021-11-21T10:30:19.000Z
|
from __future__ import annotations
from prettyqt import widgets
from prettyqt.qt import QtWidgets
QtWidgets.QStatusBar.__bases__ = (widgets.Widget,)
class StatusBar(QtWidgets.QStatusBar):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.progress_bar = widgets.ProgressBar()
def __add__(self, other: QtWidgets.QAction | QtWidgets.QWidget) -> StatusBar:
if isinstance(other, QtWidgets.QAction):
self.addAction(other)
return self
elif isinstance(other, QtWidgets.QWidget):
self.addWidget(other)
return self
else:
raise TypeError(other)
def setup_default_bar(self) -> None:
# This is simply to show the bar
self.progress_bar.hide()
self.progress_bar.setRange(0, 0)
self.progress_bar.setFixedSize(200, 20)
self.progress_bar.setTextVisible(False)
self.addPermanentWidget(self.progress_bar)
def add_action(self, action: QtWidgets.QAction) -> None:
self.addAction(action)
def add_widget(self, widget: QtWidgets.QWidget, permanent: bool = False) -> None:
if permanent:
self.addPermanentWidget(widget)
else:
self.addWidget(widget)
def show_message(self, message: str, timeout: int = 0) -> None:
self.showMessage(message, timeout)
if __name__ == "__main__":
app = widgets.app()
dlg = widgets.MainWindow()
status_bar = StatusBar()
status_bar.set_color("black")
label = widgets.Label("test")
status_bar.addWidget(label)
status_bar.setup_default_bar()
dlg.setStatusBar(status_bar)
dlg.show()
app.main_loop()
| 29.982456
| 85
| 0.657695
| 193
| 1,709
| 5.57513
| 0.38342
| 0.066915
| 0.083643
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00614
| 0.237566
| 1,709
| 56
| 86
| 30.517857
| 0.819647
| 0.017554
| 0
| 0.093023
| 0
| 0
| 0.010137
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.139535
| false
| 0
| 0.069767
| 0
| 0.27907
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab6476dc8c8370931ed198d29dc49fb830793195
| 670
|
py
|
Python
|
utils/clear.py
|
kenjiflores/annotations_list
|
ab86ac0c9a6f739db9b3d1eda66b7791dbb2f774
|
[
"MIT"
] | null | null | null |
utils/clear.py
|
kenjiflores/annotations_list
|
ab86ac0c9a6f739db9b3d1eda66b7791dbb2f774
|
[
"MIT"
] | null | null | null |
utils/clear.py
|
kenjiflores/annotations_list
|
ab86ac0c9a6f739db9b3d1eda66b7791dbb2f774
|
[
"MIT"
] | null | null | null |
# Function to clear data in instances folder before new run
import os
import shutil
def clear_instances(directory):
instances_folder = directory
folders = os.listdir(instances_folder)
for folder in folders:
shutil.rmtree(instances_folder + folder)
print('\nInstances folder cleared\n')
def clear_all():
img_dir = './data/images/'
anno_dir = './data/annotations/'
save_dir = './data/instances/'
imgs = os.listdir(img_dir)
annos = os.listdir(anno_dir)
object_folders = os.listdir(save_dir)
for img in imgs:
os.remove(img_dir + img)
for anno in annos:
os.remove(anno_dir + anno)
for folder in object_folders:
shutil.rmtree(save_dir + folder)
| 21.612903
| 59
| 0.737313
| 99
| 670
| 4.828283
| 0.343434
| 0.125523
| 0.066946
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156716
| 670
| 31
| 60
| 21.612903
| 0.846018
| 0.085075
| 0
| 0
| 0
| 0
| 0.127451
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.095238
| 0
| 0.190476
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab6578fa2d97ee82b7eac9fdd3a1cc27f58b5616
| 1,535
|
py
|
Python
|
app/museolib/libs/py14443a.py
|
urbanlab/museotouch-educatouch
|
9ad8f4329a84ba69147ffef57dc564082153bf02
|
[
"MIT"
] | 1
|
2018-02-09T12:56:01.000Z
|
2018-02-09T12:56:01.000Z
|
app/museolib/libs/py14443a.py
|
urbanlab/museotouch-educatouch
|
9ad8f4329a84ba69147ffef57dc564082153bf02
|
[
"MIT"
] | null | null | null |
app/museolib/libs/py14443a.py
|
urbanlab/museotouch-educatouch
|
9ad8f4329a84ba69147ffef57dc564082153bf02
|
[
"MIT"
] | null | null | null |
"""Python functions to handle the ISO-14443-A protocol basics (parity and CRC)"""
# Pynfc is a python wrapper for the libnfc library
# Copyright (C) 2009 Mike Auty
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
def crc(inbytes):
"""Calculates the ISO-14443A CRC checksum"""
wCrc = 0x6363
for i in range(len(inbytes)):
byte = ord(inbytes[i])
byte = (byte ^ (wCrc & 0x00FF))
byte = ((byte ^ (byte << 4)) & 0xFF)
wCrc = ((wCrc >> 8) ^ (byte << 8) ^ (byte << 3) ^ (byte >> 4)) & 0xFFFF
res = chr(wCrc & 0xFF) + chr((wCrc >> 8) & 0xff)
return res
def parity(inbytes):
"""Calculates the odd parity bits for a byte string"""
res = ""
for i in inbytes:
tempres = 1
for j in range(8):
tempres = tempres ^ ((ord(i) >> j) & 0x1)
res += chr(tempres)
return res
| 36.547619
| 83
| 0.628664
| 220
| 1,535
| 4.386364
| 0.527273
| 0.034197
| 0.040415
| 0.059067
| 0.084974
| 0.058031
| 0
| 0
| 0
| 0
| 0
| 0.042934
| 0.271661
| 1,535
| 41
| 84
| 37.439024
| 0.820215
| 0.584365
| 0
| 0.117647
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058304
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab672b40a2a078bded1e28db2d0f65c585f16827
| 4,294
|
py
|
Python
|
test_attrs_sqlalchemy.py
|
GoodRx/attrs_sqlalchemy
|
907eb269f1a0a1e4647ede5b44eb7a9210d954ae
|
[
"MIT"
] | 3
|
2016-09-27T01:02:08.000Z
|
2021-11-06T17:07:25.000Z
|
test_attrs_sqlalchemy.py
|
GoodRx/attrs_sqlalchemy
|
907eb269f1a0a1e4647ede5b44eb7a9210d954ae
|
[
"MIT"
] | 5
|
2017-01-16T01:44:45.000Z
|
2018-03-07T23:16:16.000Z
|
test_attrs_sqlalchemy.py
|
GoodRx/attrs_sqlalchemy
|
907eb269f1a0a1e4647ede5b44eb7a9210d954ae
|
[
"MIT"
] | 4
|
2017-05-11T00:53:52.000Z
|
2021-11-06T17:07:12.000Z
|
import pytest
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.hybrid import hybrid_property
from attrs_sqlalchemy import attrs_sqlalchemy
class TestAttrsSqlalchemy:
def test_deprecation(self):
with pytest.warns(UserWarning, match='attrs_sqlalchemy is deprecated'):
@attrs_sqlalchemy
class MyModel(declarative_base()):
__tablename__ = 'mymodel'
id = sa.Column(sa.Integer, primary_key=True)
@pytest.mark.parametrize('decorator', [attrs_sqlalchemy, attrs_sqlalchemy()])
def test_attrs_sqlalchemy(self, decorator):
"""
Decorating a class with ``@attrs_sqlalchemy`` or
``@attrs_sqlalchemy()`` should add ``__repr__``, ``__eq__``, and
``__hash__`` methods based on the fields on the model.
"""
TestBase = declarative_base()
@attrs_sqlalchemy
class MyModel(TestBase):
__tablename__ = 'mymodel'
id = sa.Column(sa.Integer, primary_key=True)
text = sa.Column(sa.String)
instance = MyModel(id=1, text='hello')
same_data = MyModel(id=1, text='hello')
same_pk = MyModel(id=1, text='world')
# All fields are the same
assert instance == same_data
# Primary key is not enough for equality
assert instance != same_pk
# Instances should have a repr containing their keys and type
assert repr(instance) == "MyModel(id=1, text='hello')"
# Instances should be hashable by their fields and used in a dict
d = {instance: True}
assert d.get(same_data) == d[instance]
assert d.get(same_pk) is None
def test_field_name_not_column_name(self):
"""
``@attrs_sqlalchemy`` should use attribute/field names, not column names.
"""
@attrs_sqlalchemy
class MyModel(declarative_base()):
__tablename__ = 'mymodel'
_id = sa.Column('id', sa.Integer, primary_key=True)
text = sa.Column(sa.String)
assert {attr.name for attr in MyModel.__attrs_attrs__} == {'_id', 'text'}
def test_subclass(self):
"""
When used on a subclass, ``@attrs_sqalchemy`` should also include the
attributes from the parent class(es), even if a parent class is also
decorated with ``@attrs_sqlalchemy``.
"""
@attrs_sqlalchemy
class ParentModel(declarative_base()):
__tablename__ = 'parent'
id = sa.Column(sa.Integer, primary_key=True)
type = sa.Column(sa.String)
__mapper_args__ = {
'polymorphic_identity': 'parent',
'polymorphic_on': type,
}
@attrs_sqlalchemy
class ChildModel(ParentModel):
__tablename__ = 'child'
id = sa.Column(sa.Integer, sa.ForeignKey('parent.id'), primary_key=True)
child_field = sa.Column(sa.Integer)
__mapper_args__ = {
'polymorphic_identity': 'child',
}
@attrs_sqlalchemy
class ChildChildModel(ChildModel):
__tablename__ = 'very_child'
id = sa.Column(sa.Integer, sa.ForeignKey('child.id'), primary_key=True)
very_child_field = sa.Column(sa.String)
__mapper_args__ = {
'polymorphic_identity': 'childchild',
}
assert {attr.name for attr in ChildModel.__attrs_attrs__} == {
'id', 'type', 'child_field',
}
assert {attr.name for attr in ChildChildModel.__attrs_attrs__} == {
'id', 'type', 'child_field', 'very_child_field',
}
def test_hybrid_property(self):
"""
Hybrid properties should not be included in the attributes used by
``@attrs_sqlalchemy``.
"""
@attrs_sqlalchemy
class MyModel(declarative_base()):
__tablename__ = 'mymodel'
id = sa.Column(sa.Integer, primary_key=True)
text = sa.Column(sa.String)
@hybrid_property
def tiny_text(self): # pragma: no cover
return self.text[:10]
assert {attr.name for attr in MyModel.__attrs_attrs__} == {'id', 'text'}
| 32.778626
| 84
| 0.598742
| 470
| 4,294
| 5.17234
| 0.251064
| 0.111065
| 0.049362
| 0.048951
| 0.381736
| 0.348828
| 0.275195
| 0.275195
| 0.194981
| 0.194981
| 0
| 0.001992
| 0.298556
| 4,294
| 130
| 85
| 33.030769
| 0.805113
| 0.166279
| 0
| 0.307692
| 0
| 0
| 0.089166
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 1
| 0.076923
| false
| 0
| 0.064103
| 0.012821
| 0.25641
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab6bef72589cc503d05f80a672c71bbfadb1fbde
| 1,001
|
py
|
Python
|
toolkit/controller/audit/page_audit.py
|
salonimalhotra-ui/seo-audits-toolkit
|
99af8b53dffad45f679eaf06b4a8080df75fcd72
|
[
"MIT"
] | 1
|
2020-12-21T18:21:34.000Z
|
2020-12-21T18:21:34.000Z
|
toolkit/controller/audit/page_audit.py
|
x0rzkov/seo-audits-toolkit
|
29994cbab51bd0697c717b675df3c176096e4f03
|
[
"MIT"
] | null | null | null |
toolkit/controller/audit/page_audit.py
|
x0rzkov/seo-audits-toolkit
|
29994cbab51bd0697c717b675df3c176096e4f03
|
[
"MIT"
] | null | null | null |
from urllib.parse import urlparse
from toolkit.lib.http_tools import request_page
from bs4 import BeautifulSoup
class AuditPage():
def __init__(self, url):
parsed_url = urlparse(url)
self.domain = parsed_url.netloc
self.scheme = parsed_url.scheme
self.path = parsed_url.path
self.request = request_page(self.generate_url())
self.status_code = self.request.status_code
self.headers = self.request.headers
self.soup = BeautifulSoup(self.request.content, 'html.parser')
def __str__(self):
a = "--------------------\n"
a += "Domain: " + self.domain + "\n"
a += "Scheme: " + self.scheme + "\n"
a += "Path: " + self.path + "\n"
a += "Status Code: " + str(self.status_code) + "\n"
a += "Headers: " + str([x for x in self.headers]) + "\n"
return a
def generate_url(self):
return self.scheme + "://" + self.domain + "/" + self.path
| 27.805556
| 70
| 0.563437
| 120
| 1,001
| 4.533333
| 0.325
| 0.018382
| 0.055147
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001395
| 0.283716
| 1,001
| 36
| 71
| 27.805556
| 0.757322
| 0
| 0
| 0
| 0
| 0
| 0.090818
| 0.021956
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0
| 0.130435
| 0.043478
| 0.391304
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab6c9a0f2f081341d061a42592f2a778f4f62494
| 2,968
|
py
|
Python
|
tests/test_autoupdate.py
|
cmu-cs-academy/cpython-cmu-graphics
|
04622a80642156ad646a00203899a8f3726f5b73
|
[
"BSD-3-Clause"
] | 3
|
2020-02-01T22:24:24.000Z
|
2020-04-20T16:59:08.000Z
|
tests/test_autoupdate.py
|
cmu-cs-academy/cpython-cmu-graphics
|
04622a80642156ad646a00203899a8f3726f5b73
|
[
"BSD-3-Clause"
] | 5
|
2020-11-03T21:47:36.000Z
|
2021-02-23T22:18:14.000Z
|
tests/test_autoupdate.py
|
cmu-cs-academy/cpython-cmu-graphics
|
04622a80642156ad646a00203899a8f3726f5b73
|
[
"BSD-3-Clause"
] | 1
|
2020-06-11T21:03:50.000Z
|
2020-06-11T21:03:50.000Z
|
import os
import shutil
import zipfile
from http.server import HTTPServer, CGIHTTPRequestHandler
import threading
import subprocess
import sys
import traceback
PORT = 3000
os.chdir('autoupdate')
def create_folder_and_zip():
def zipdir(path, ziph):
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file))
os.mkdir('cmu_graphics_installer')
shutil.copytree('../../cmu_graphics', 'cmu_graphics_installer/cmu_graphics')
with zipfile.ZipFile('cmu_graphics_installer.zip', 'w', zipfile.ZIP_DEFLATED) as zipf:
zipdir('cmu_graphics_installer', zipf)
shutil.move('cmu_graphics_installer/cmu_graphics', 'cmu_graphics')
os.rmdir('cmu_graphics_installer')
os.mkdir('srv')
shutil.move('cmu_graphics_installer.zip', 'srv/cmu_graphics_installer.zip')
# server version
with open('srv/version.txt', 'w+') as f:
f.write('0.0.1')
# local version
with open('cmu_graphics/meta/version.txt', 'w') as f:
f.write('0.0.0')
def set_mock_urls():
def replace_in_file(filename, old_string, new_string):
content = None
with open(filename, 'r') as f:
content = f.read()
with open(filename, 'w') as f:
f.write(content.replace(old_string, new_string))
replace_in_file(
'cmu_graphics/updater.py',
'https://s3.amazonaws.com/cmu-cs-academy.lib.prod/desktop-cmu-graphics/cmu_graphics_installer.zip',
'http://localhost:%d/srv/cmu_graphics_installer.zip' % PORT
)
replace_in_file(
'cmu_graphics/cmu_graphics.py',
'https://s3.amazonaws.com/cmu-cs-academy.lib.prod/desktop-cmu-graphics/version.txt',
'http://localhost:%d/srv/version.txt' % PORT
)
def run_server():
httpd = HTTPServer(('', PORT), CGIHTTPRequestHandler)
httpd.serve_forever()
def spawn_server():
daemon = threading.Thread(target=run_server)
daemon.setDaemon(True)
daemon.start()
def run_student_code():
p = subprocess.Popen(
[sys.executable, 'update_trigger.py'],
env={**os.environ, 'CMU_GRAPHICS_AUTO_UPDATE': 'YES'}
)
assert(p.wait() == 0)
def assert_update_succeeded():
with open('cmu_graphics/meta/version.txt', 'r') as f:
assert f.read() != '0.0.0'
run_student_code()
def cleanup():
for dir in ('cmu_graphics', 'cmu_graphics_installer', 'srv'):
if os.path.exists(dir):
shutil.rmtree(dir)
for file in ('cmu_graphics_installer.zip', 'version.txt'):
if os.path.exists(file):
os.remove(file)
def main():
exit_code = 0
try:
create_folder_and_zip()
set_mock_urls()
spawn_server()
run_student_code() # causes an update
assert_update_succeeded()
except:
traceback.print_exc()
exit_code = 1
finally:
cleanup()
os._exit(exit_code)
if __name__ == "__main__":
main()
| 27.738318
| 107
| 0.647574
| 392
| 2,968
| 4.681122
| 0.308673
| 0.149864
| 0.13079
| 0.075204
| 0.26703
| 0.117711
| 0.117711
| 0.087193
| 0.087193
| 0.063215
| 0
| 0.007749
| 0.217318
| 2,968
| 106
| 108
| 28
| 0.782178
| 0.015162
| 0
| 0.047619
| 0
| 0.02381
| 0.272354
| 0.136691
| 0
| 0
| 0
| 0
| 0.047619
| 1
| 0.119048
| false
| 0
| 0.095238
| 0
| 0.214286
| 0.011905
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab6e4e87d4f9d0f3716debd3c918f62dc6e778c5
| 8,202
|
py
|
Python
|
xy_stage/axis.py
|
kmharrington/xy_stage_control
|
1aef165a2ecebf9bd7a659435ff6905ed79b1726
|
[
"MIT"
] | null | null | null |
xy_stage/axis.py
|
kmharrington/xy_stage_control
|
1aef165a2ecebf9bd7a659435ff6905ed79b1726
|
[
"MIT"
] | 2
|
2021-04-09T15:57:41.000Z
|
2021-09-27T15:50:56.000Z
|
xy_stage/axis.py
|
kmharrington/xy_stage_control
|
1aef165a2ecebf9bd7a659435ff6905ed79b1726
|
[
"MIT"
] | 1
|
2021-04-23T18:29:43.000Z
|
2021-04-23T18:29:43.000Z
|
import RPi.GPIO as GPIO
import time
import os
class Axis:
"""
Base Class for one of the XY gantry axes
All move commands are written to be called in threads
self.position and/or self.step position can safely be queried
while the axis is moving.
"""
def __init__(self, name, pin_list, steps_per_cm, logfile=None):
self.name = name
self.ena = pin_list['ena']
self.pul = pin_list['pul']
self.dir = pin_list['dir']
self.eot_ccw = pin_list['eot_ccw']
self.eot_cw = pin_list['eot_cw']
self.setup_pins()
self.hold_enable = False
self.keep_moving = False
self.step_position = 0
self.logfile = logfile
if self.logfile is not None and os.path.exists(self.logfile):
with open(self.logfile, "r") as pos_file:
self.step_position = int(pos_file.read())
self.steps_per_cm = steps_per_cm
self.max_vel = 1.27 ## cm / s
self.homed = False
@property
def position(self):
return self.step_position / self.steps_per_cm
@position.setter
def position(self, value):
if self.keep_moving:
raise ValueError("Cannot update position while moving")
self.step_position = value*self.steps_per_cm
@property
def limits(self):
'''
Returns: (home limit, far side limit)
'''
self.set_limits()
return self.lim_cw, self.lim_ccw
def setup_pins(self):
GPIO.setmode(GPIO.BCM)
for pin in [self.ena, self.pul, self.dir]:
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, GPIO.HIGH)
for pin in [self.eot_ccw, self.eot_cw]:
GPIO.setup(pin, GPIO.IN)
def set_limits(self):
### pins go low when they are engaged
self.lim_ccw = GPIO.input(self.eot_ccw) == GPIO.LOW
self.lim_cw = GPIO.input(self.eot_cw) == GPIO.LOW
return self.lim_ccw or self.lim_cw
def home(self, max_dist=150, reset_pos=True):
"""Move axis at 1 cm/s toward the home limit.
Arguments
----------
max_dist : float
the maximum number of cm to move for homing
reset_pos : bool
if true, axis position is reset to zero
"""
while not self.lim_cw:
self.move_cm(True, max_dist, velocity=1)
if reset_pos:
self.step_position = 0
self.homed = True
def move_cm(self, dir, distance, velocity=None):
'''
Axis Moves the commanded number of cm. Converts to steps
and calls the move_step function
Args:
dir -- True goes toward home (the motors)
distance -- number of cm to move
velocity -- how quickly to move
'''
steps = distance*self.steps_per_cm
if velocity is None:
velocity = self.max_vel
if velocity > self.max_vel:
print('Requested Velocity too high, setting to {} cm/s'.format(self.max_vel))
velocity = self.max_vel
wait = 1.0/(2*velocity*self.steps_per_cm)
success, steps = self.move_step(dir, steps, wait)
return success, steps/self.steps_per_cm
def move_to_cm(self, new_position, velocity=None, require_home=True):
'''
Move Axis to the requested position.
Args:
new_position -- the position you want to move to
velocity -- how fast to move (cm/s)
require_home -- if true, requires the axis position to be calibrated
defaults to true to prevent mistakes
'''
if not self.homed:
if require_home:
print('ERROR -- Axis Position Not Calibrated')
return False
print('WARNING -- Axis Position Not calibrated')
distance = new_position - self.position
if distance < 0:
return self.move_cm( True, abs(distance), velocity)
return self.move_cm(False, abs(distance), velocity)
def enable(self):
self.hold_enable = True
GPIO.output(self.ena, GPIO.LOW)
def disable(self):
self.hold_enable = False
GPIO.output(self.ena, GPIO.HIGH)
def move_step(self, dir, steps=100, wait=0.005):
## direction = False is toward the CCW limit
## direction = True is toward the CW limit
steps = int(round(steps))
self.keep_moving = True
if dir:
increment = -1
else:
increment = 1
if not self.hold_enable:
GPIO.output( self.ena, GPIO.LOW)
GPIO.output(self.dir, dir)
time.sleep(0.25)
while steps > 0 and self.keep_moving:
if self.set_limits():
if (not dir) and self.lim_ccw:
#print('Hit CCW limti with {} steps left'.format(steps))
self.keep_moving = False
break
elif dir and self.lim_cw:
### true goes to home
self.keep_moving = False
break
#print('LIMIT!')
#print('CCW: ', self.lim_ccw, 'CW:', self.lim_cw)
GPIO.output(self.pul, GPIO.HIGH)
time.sleep(wait)
GPIO.output(self.pul, GPIO.LOW)
time.sleep(wait)
self.step_position += increment
steps -= 1
if self.logfile is not None:
with open(self.logfile, "w") as pos_file:
pos_file.write(self.step_position)
if not self.hold_enable:
GPIO.output(self.ena, GPIO.HIGH)
if not self.keep_moving:
#print('I think I hit a limit with {} steps left'.format(steps))
return False, steps
self.keep_moving = False
return True, steps
def stop(self):
self.keep_moving = False
def cleanup(self):
GPIO.cleanup()
class CombinedAxis(Axis):
"""
Two axes where the control outputs are electrically connected
This assumes there's two limit switches per axes so there's two
limits on each side.
"""
def setup_pins(self):
GPIO.setmode(GPIO.BCM)
for pin in [self.ena, self.pul, self.dir]:
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, GPIO.HIGH)
for pins in [self.eot_ccw, self.eot_cw]:
for pin in pins:
GPIO.setup(pin, GPIO.IN)
def set_limits(self):
### pins go low when they are engaged
self.lim_ccw = (GPIO.input(self.eot_ccw[0]) == GPIO.LOW) or (GPIO.input(self.eot_ccw[1]) == GPIO.LOW)
self.lim_cw = (GPIO.input(self.eot_cw[0]) == GPIO.LOW) or (GPIO.input(self.eot_cw[1]) == GPIO.LOW)
return self.lim_ccw or self.lim_cw
if __name__ == '__main__':
from threading import Thread
STEP_PER_CM = 1574.80316
### used when only one axis is plugged in to Xa
x_axis = Axis('X',
pin_list={
'ena':2, 'pul':4, 'dir':3,
'eot_ccw':17, 'eot_cw':27},
steps_per_cm = STEP_PER_CM)
'''
#### BCM PIN NUMBERS
x_axis = CombinedAxis('X',
pin_list={
'ena':2, 'pul':4, 'dir':3,
'eot_ccw':[17,23], 'eot_cw':[27,24]},
steps_per_cm = STEP_PER_CM)
y_axis = Axis('Y',
pin_list={
'ena':16, 'pul':21, 'dir':20,
'eot_ccw':19, 'eot_cw':26},
steps_per_cm = STEP_PER_CM)
'''
#x = Thread(target=x_axis.move_to_cm, args=(10, 1, False))
x = Thread(target=x_axis.home, args=() )
print('starting')
x.start()
time.sleep(0.01)
while x_axis.keep_moving:
time.sleep(1)
print(x_axis.position)
if x_axis.position > 4:
x_axis.position=0
x.join()
print('all done')
x_axis.stop()
#x_axis.move_step(False, 5000, 0.0001)
#time.sleep(0.1)
#test.move(True,2000, 0.0001)
#test.print_limits(nread=40, wait=0.25)
#time.sleep(30)
| 31.068182
| 111
| 0.554743
| 1,108
| 8,202
| 3.962094
| 0.200361
| 0.017084
| 0.025057
| 0.019134
| 0.274943
| 0.208428
| 0.172665
| 0.163098
| 0.151253
| 0.151253
| 0
| 0.019416
| 0.340649
| 8,202
| 263
| 112
| 31.186312
| 0.792345
| 0.19922
| 0
| 0.263514
| 0
| 0
| 0.039059
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108108
| false
| 0
| 0.027027
| 0.006757
| 0.216216
| 0.040541
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab6ee2ca9fb46ff0a520d9cacda3106f74ee51ad
| 2,788
|
py
|
Python
|
slacklogs2mongo.py
|
lbn/slacklogs2mongo
|
38069cac094cf745f54702c5f8ebbca847e95bdc
|
[
"MIT"
] | null | null | null |
slacklogs2mongo.py
|
lbn/slacklogs2mongo
|
38069cac094cf745f54702c5f8ebbca847e95bdc
|
[
"MIT"
] | null | null | null |
slacklogs2mongo.py
|
lbn/slacklogs2mongo
|
38069cac094cf745f54702c5f8ebbca847e95bdc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import os
import json
from pymongo import MongoClient
class LogImporter(object):
def __init__(self, data, constring):
self.data_dir = data
self.db = MongoClient(constring).get_default_database()
self.messages = self.db.messages
self.channels = self.__load_channels()
self.channel_ids = {chan["name"]: chan["id"] for chan in self.channels}
self.users = self.__load_users()
self.user_names = {user["id"]: user["name"] for user in self.users}
def __load_channels(self):
chan_file = os.path.join(self.data_dir, "channels.json")
f = open(chan_file)
channels = json.load(f)
f.close()
return channels
def __load_users(self):
user_file = os.path.join(self.data_dir, "users.json")
f = open(user_file)
users = json.load(f)
f.close()
return users
def channel(self, chan):
def insert_messages(msgs):
if len(msgs) == 0:
return
self.messages.insert(msgs)
def insert_chunk(filename):
f = open(filename)
msgs = json.load(f)
f.close()
for msg in msgs:
msg["channel"] = self.channel_ids[chan]
msg["channel_name"] = chan
if "user" not in msg:
continue
if msg["user"] == "USLACKBOT":
msg["user_name"] = "slackbot"
elif msg["user"] in self.user_names:
msg["user_name"] = self.user_names[msg["user"]]
else:
msg["user_name"] = "unknown"
insert_messages(msgs)
chan_dir = os.path.join(self.data_dir, chan)
if not (os.path.exists(chan_dir) and os.path.isdir(chan_dir)):
raise ValueError("Channel could not be found in the log directory")
for filename in os.listdir(chan_dir):
insert_chunk(os.path.join(chan_dir, filename))
def all_channels(self):
chans = [f for f in os.listdir(self.data_dir)
if os.path.isdir(os.path.join(self.data_dir, f))]
for chan in chans:
self.channel(chan)
def main():
parser = argparse.ArgumentParser(description="Import Slack logs into MongoDB")
parser.add_argument("--logs", required=True, type=str, metavar="LOGDIR", help="Directory containing Slack log files")
parser.add_argument("--mongo", required=True, type=str, metavar="MONGOCON", help="MongoDB connection string")
args = parser.parse_args()
importer = LogImporter(data=os.path.realpath(args.logs), constring=args.mongo)
importer.all_channels()
if __name__ == "__main__":
main()
| 32.045977
| 121
| 0.588594
| 351
| 2,788
| 4.507123
| 0.293447
| 0.034134
| 0.041719
| 0.035398
| 0.152339
| 0.084703
| 0.031606
| 0
| 0
| 0
| 0
| 0.00101
| 0.289455
| 2,788
| 86
| 122
| 32.418605
| 0.797577
| 0.015065
| 0
| 0.046154
| 0
| 0
| 0.107143
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.123077
| false
| 0
| 0.123077
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab70efcd3dba1a5b5892d70293ed4be2b4a82829
| 973
|
py
|
Python
|
moai/parameters/optimization/swa.py
|
tzole1155/moai
|
d1afb3aaf8ddcd7a1c98b84d6365afb846ae3180
|
[
"Apache-2.0"
] | 10
|
2021-04-02T11:21:33.000Z
|
2022-01-18T18:32:32.000Z
|
moai/parameters/optimization/swa.py
|
tzole1155/moai
|
d1afb3aaf8ddcd7a1c98b84d6365afb846ae3180
|
[
"Apache-2.0"
] | 1
|
2022-03-22T20:10:55.000Z
|
2022-03-24T13:11:02.000Z
|
moai/parameters/optimization/swa.py
|
tzole1155/moai
|
d1afb3aaf8ddcd7a1c98b84d6365afb846ae3180
|
[
"Apache-2.0"
] | 3
|
2021-05-16T20:47:40.000Z
|
2021-12-01T21:15:36.000Z
|
from moai.parameters.optimization.optimizers.swa import SWA as Outer
import torch
import omegaconf.omegaconf
import hydra.utils as hyu
import typing
__all__ = ['SWA']
#NOTE: modified from https://github.com/alphadl/lookahead.pytorch
class SWA(object):
"""Implements Stochastic Weight Averaging (SWA).
- **Paper**: [Averaging Weights Leads to Wider Optima and Better Generalization](https://arxiv.org/pdf/1803.05407.pdf)
- **Implementation**: [GitHub @ pytorch](https://github.com/pytorch/contrib/tree/master/torchcontrib)
"""
def __init__(self,
parameters: typing.Iterator[torch.nn.Parameter],
optimizers: omegaconf.DictConfig,
swa_start=None,
swa_freq=None,
swa_lr=None
):
self.optimizers = [
Outer(
optimizer=hyu.instantiate(optimizers[0], parameters),
swa_start=swa_start, swa_lr=swa_lr, swa_freq=swa_freq,
)
]
| 30.40625
| 126
| 0.654676
| 111
| 973
| 5.585586
| 0.567568
| 0.03871
| 0.045161
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013405
| 0.233299
| 973
| 31
| 127
| 31.387097
| 0.817694
| 0.341213
| 0
| 0
| 0
| 0
| 0.004894
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.25
| 0
| 0.35
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab74b112917c353df9f1d1d587360e07a209c98f
| 4,500
|
py
|
Python
|
src/gui/droDisplayWidget.py
|
SnailAF/SimpleFOCStudio
|
01199a82e6ab8b6461e9eca3b6a10d19756549f7
|
[
"MIT"
] | null | null | null |
src/gui/droDisplayWidget.py
|
SnailAF/SimpleFOCStudio
|
01199a82e6ab8b6461e9eca3b6a10d19756549f7
|
[
"MIT"
] | null | null | null |
src/gui/droDisplayWidget.py
|
SnailAF/SimpleFOCStudio
|
01199a82e6ab8b6461e9eca3b6a10d19756549f7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from PyQt5 import QtGui, QtWidgets, QtCore
from sharedcomponets import GUIToolKit
import logging
from simpleFOCConnector import SimpleFOCDevice
class DROGroupBox(QtWidgets.QGroupBox):
def __init__(self, parent=None, simpleFocConn=None):
"""Constructor for ToolsWidget"""
super().__init__(parent)
self.device = simpleFocConn
self.setTitle("Simple FOC Digital Read Out")
self.setObjectName("digitalReadOutGroupBox")
self.droHorizontalLayout = QtWidgets.QHBoxLayout(self)
self.droHorizontalLayout.setObjectName("droHorizontalLayout")
self.signal0Label = QtWidgets.QLabel(self)
self.signal0Label.setObjectName("signal0Label")
self.signal0Label.setText("Signal 0")
self.droHorizontalLayout.addWidget(self.signal0Label)
self.signal0LCDNumber = QtWidgets.QLCDNumber(self)
self.putStyleToLCDNumber(self.signal0LCDNumber)
self.signal0LCDNumber.setObjectName("signal0LCDNumber")
self.droHorizontalLayout.addWidget(self.signal0LCDNumber)
self.signal1Label = QtWidgets.QLabel(self)
self.signal1Label.setObjectName("signal1Label")
self.signal1Label.setText("Signal 1")
self.droHorizontalLayout.addWidget(self.signal1Label)
self.signal1LCDNumber = QtWidgets.QLCDNumber(self)
self.putStyleToLCDNumber(self.signal1LCDNumber)
self.signal1LCDNumber.setObjectName("signal1LCDNumber")
self.droHorizontalLayout.addWidget(self.signal1LCDNumber)
self.signal2Label = QtWidgets.QLabel(self)
self.signal2Label.setObjectName("voltageLable")
self.signal2Label.setText("Signal 2")
self.droHorizontalLayout.addWidget(self.signal2Label)
self.signal2LCDNumber = QtWidgets.QLCDNumber(self)
self.putStyleToLCDNumber(self.signal2LCDNumber)
self.signal2LCDNumber.setObjectName("signal2LCDNumber")
self.droHorizontalLayout.addWidget(self.signal2LCDNumber)
self.device.commProvider.telemetryDataReceived.connect(self.updateDRO)
self.device.addControlLoopModeListener(self)
self.controlLoopModeChanged(self.device.controlType)
self.initDiplay()
self.disableUI()
self.device.addConnectionStateListener(self)
def updateLabels(self, label0, label1, label2):
self.signal0Label.setText(label0)
self.signal1Label.setText(label1)
self.signal2Label.setText(label2)
def deviceConnected(self, deviceConnected):
if deviceConnected is True:
self.enabeUI()
self.initDiplay()
else:
self.initDiplay()
self.disableUI()
def enabeUI(self):
self.setEnabled(True)
def disableUI(self):
self.setEnabled(False)
def initDiplay(self):
self.signal0LCDNumber.display(0.0)
self.signal1LCDNumber.display(0.0)
self.signal2LCDNumber.display(0.0)
def putStyleToLCDNumber(self, lcdNumber):
lcdNumber.setStyleSheet("""QLCDNumber {background-color: white;}""")
palette = self.setColor(lcdNumber.palette(),GUIToolKit.RED_COLOR)
lcdNumber.setPalette(palette)
def setColor(self, palette, colorTouple):
R = colorTouple[0]
G = colorTouple[1]
B = colorTouple[2]
# foreground color
palette.setColor(palette.WindowText, QtGui.QColor(R,G,B))
# background color
palette.setColor(palette.Background, QtGui.QColor(R,G,B))
# "light" border
palette.setColor(palette.Light, QtGui.QColor(R,G,B))
# "dark" border
palette.setColor(palette.Dark, QtGui.QColor(R,G,B))
return palette
def setValues(self, values):
self.signal0LCDNumber.display(values[0])
self.signal1LCDNumber.display(values[1])
self.signal2LCDNumber.display(values[2])
def updateDRO(self, signal0, signal1, signal2):
try:
if type(signal0) is float and type(signal1) is float and type(signal2) is float:
self.signal0LCDNumber.display(signal0)
self.signal2LCDNumber.display(signal1)
self.signal1LCDNumber.display(signal2)
except IndexError as error:
logging.error(error, exc_info=True)
def controlLoopModeChanged(self, controlMode):
label0, label1, label2 = SimpleFOCDevice.getSignalLabels(controlMode)
self.updateLabels(label0, label1, label2)
| 37.5
| 92
| 0.691111
| 421
| 4,500
| 7.36342
| 0.280285
| 0.028387
| 0.061935
| 0.069677
| 0.066452
| 0.048387
| 0
| 0
| 0
| 0
| 0
| 0.022254
| 0.211111
| 4,500
| 120
| 93
| 37.5
| 0.850986
| 0.029556
| 0
| 0.05618
| 0
| 0
| 0.048887
| 0.005049
| 0
| 0
| 0
| 0
| 0
| 1
| 0.123596
| false
| 0
| 0.044944
| 0
| 0.191011
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab7778c7769a430660818d3ba21a31e9b8a869e3
| 7,157
|
py
|
Python
|
coredump/pycore/take_core.py
|
fooofei/c_cpp
|
83b780fd48cd3c03fd3850fb297576d5fc907955
|
[
"MIT"
] | null | null | null |
coredump/pycore/take_core.py
|
fooofei/c_cpp
|
83b780fd48cd3c03fd3850fb297576d5fc907955
|
[
"MIT"
] | null | null | null |
coredump/pycore/take_core.py
|
fooofei/c_cpp
|
83b780fd48cd3c03fd3850fb297576d5fc907955
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
'''
###
这个文件用来接管 控制 coredump 的存储
### 需手动
echo "|/usr/bin/python /docker_host/1.py" > /proc/sys/kernel/core_pattern
### sys.stdin 如果没有数据 返回 '',不是 None
###
容器里对 core 对操作会持久化到 image 里
预期在 container 中对 /proc/sys/kernel/core_pattern 做的修改 如果不 commit ,
下次从相同到 image 进入 container core 应该还是未修改的,
在 macOS 测试却是修改过的
###
core dump 的小说明
https://favoorr.github.io/2017/02/10/learn-gdb-from-brendan/
'''
import os
import sys
import datetime
import io
import contextlib
import gzip
import argparse
import time
curpath = os.path.realpath(__file__)
curpath = os.path.dirname(curpath)
CORE_FILE_PREFIX = "core_time"
def redirect_to_file(fullpath, inputs):
'''
Write input to file, not compress
'''
if os.path.exists(fullpath):
os.remove(fullpath)
with open(fullpath, "wb") as fw:
while 1:
c = inputs.read(1024)
if not c:
break
fw.write(c)
return fullpath
def redirect_to_gzfile(fullpath, inputs):
'''
Write input to file, with compress
'''
if os.path.exists(fullpath):
os.remove(fullpath)
if not fullpath.endswith(".gz"):
fullpath = fullpath + ".gz"
if os.path.exists(fullpath):
os.remove(fullpath)
with gzip.open(fullpath, "wb", compresslevel=9) as fw:
with contextlib.closing(io.BufferedWriter(fw)) as fww:
while 1:
c = inputs.read(1024)
if not c:
break
fww.write(c)
return fullpath
def tick_called(msg):
'''
用来记录被调用 测试脚本时使用
'''
called = os.path.join(curpath, "called")
with open(called, "ab+") as fw:
fw.write("{t} called msg={m}\n".format(t=datetime.datetime.now(), m=msg))
def core_read(**kwargs):
'''
path_corepattern
'''
with open(kwargs.get("path_corepattern", ""), "rb") as fr:
c = fr.read()
return c
def core_set(**kwargs):
'''
path_corepattern
'''
path_corepattern = kwargs.get("path_corepattern", "")
print("[+] Before register the content of {f} ={c}"
.format(f=path_corepattern, c=core_read(**kwargs)))
towrite = "|/usr/bin/python {f} %p %u %h %e".format(f=os.path.realpath(__file__))
with open(path_corepattern, "wb") as fw:
fw.write(towrite)
print("[+] After register the content of {f} ={c}"
.format(f=path_corepattern, c=core_read(**kwargs)))
def check_core_file_limits(**kwargs):
'''
kwargs contains:
total_core_count_limit
core_saved_path
根据配置文件的配置和已经生成的core 决定要不要继续保存core
默认保存 Return True
'''
count_limit = kwargs.get('total_core_count_limit', None)
path_saved = kwargs.get('core_saved_path', None)
if not (count_limit and path_saved):
return True
counts = 0
sizes = 0
for child in os.listdir(path_saved):
p = os.path.join(path_saved, child)
if os.path.isfile(p) and child.startswith(CORE_FILE_PREFIX):
counts += 1
sizes += os.path.getsize(p)
if count_limit is not None and count_limit > 0 and counts >= count_limit:
return False
return True
def core_restore(**kwargs):
'''
path_corepattern
'''
path_corepattern = kwargs.get('path_corepattern', "")
print('[+] Before restore the content of {f} ={c}'
.format(f=path_corepattern, c=core_read(**kwargs)))
towrite = 'core'
with open(path_corepattern, 'wb') as fw:
fw.write(towrite)
print('[+] After restore the content of {f} ={c}'
.format(f=path_corepattern, c=core_read(**kwargs)))
def core_generate(**kwargs):
'''
core_pid
core_uid
core_hostname
core_execname
core_saved_path
core_input
:return fullpath_core
'''
now = time.time() * 1000
now = int(now)
filename = "{pf}-{t}_pid-{pid}_uid-{uid}_host-{hostname}_name-{execname}".format(
pf = CORE_FILE_PREFIX,
t=now, pid=kwargs.get("core_pid", ""), uid=kwargs.get("core_uid", ""),
hostname=kwargs.get("core_hostname", ""), execname=kwargs.get("core_execname", "")
)
saved = kwargs.get("core_saved_path", None) or curpath
fullpath = os.path.join(saved, filename)
return redirect_to_gzfile(fullpath, kwargs.get("core_input", sys.stdin))
def logging(**kwargs):
''' 生成 core 时, 写业务日志 说我们生成 core 了
'''
# format 2018-04-28 02:20:47:[INFO]===>load_black_white_list_conf==>enter hc->hid=142
logf = kwargs.get('path_log', '')
fp = kwargs.get('path_core_file', '')
nowutc = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if logf and os.path.exists(os.path.dirname(logf)):
with open(logf, 'a+') as fw:
fw.write("{t}:[ERROR]===>pycore==>cored".format(t=nowutc))
if fp and os.path.exists(fp):
fw.write(" write core file {f}".format(f=fp))
fw.write("\n")
def entry():
fullpath_core_pattern = "/proc/sys/kernel/core_pattern"
fullpath_log = "/home/logs"
if not os.path.exists(fullpath_log):
os.makedirs(fullpath_log)
fullpath_log = os.path.join(fullpath_log, "cores.log")
function_args = {
"path_corepattern": fullpath_core_pattern,
"path_log": fullpath_log,
"core_saved_path":"/home",
"total_core_count_limit": 3,
}
parser = argparse.ArgumentParser(description='Use for take over the core by pipe',
version='1.0')
parser.add_argument('--set', action='store_true',
help='Register this python file in {f} for core dump pipe'.format(f=fullpath_core_pattern))
parser.add_argument('--restore', action='store_true',
help='Restore the file {f}'.format(f=fullpath_core_pattern))
parser.add_argument('--testcore', action='store_true',
help='Test core generate')
args, unknownargs = parser.parse_known_args()
if args.restore:
core_restore(**function_args)
elif args.set:
core_set(**function_args)
elif args.testcore:
fp = open(os.path.join(curpath, 'core_test'), 'rb')
function_args.update({
'core_pid': 1, 'core_uid': 2,
'core_hostname': 3,
'core_execname': 4,
'core_saved_path': curpath,
'core_input': fp,
})
v = core_generate(**function_args)
fp.close()
print('[+] Generate core {f}'.format(f=v))
elif len(unknownargs) > 3:
function_args.update({
'core_pid': unknownargs[0], 'core_uid': unknownargs[1],
'core_hostname': unknownargs[2],
'core_execname': unknownargs[3],
'core_input': sys.stdin,
})
if check_core_file_limits(**function_args):
core_file = core_generate(**function_args)
logging(**function_args, path_core_file=core_file)
else:
parser.print_help()
if __name__ == '__main__':
sy = sys.version_info
if not (sy.major >= 2 and sy.minor >= 7):
raise ValueError('only support Python version up 2.7.x')
entry()
| 29.331967
| 115
| 0.606958
| 938
| 7,157
| 4.452026
| 0.263326
| 0.024425
| 0.021791
| 0.019157
| 0.264368
| 0.218391
| 0.204023
| 0.189176
| 0.168582
| 0.135536
| 0
| 0.011591
| 0.25262
| 7,157
| 243
| 116
| 29.452675
| 0.769116
| 0.125472
| 0
| 0.184211
| 0
| 0.006579
| 0.172833
| 0.02664
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065789
| false
| 0
| 0.052632
| 0
| 0.164474
| 0.039474
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab7783e06fb3be6a597351468894053b80c2161f
| 595
|
py
|
Python
|
docs/source/examples/errorbars.py
|
alexras/boomslang
|
62b6dc3a183fd8686b165c4abdb55d10d537b4ab
|
[
"BSD-3-Clause"
] | 4
|
2015-02-24T06:50:08.000Z
|
2020-08-08T03:23:32.000Z
|
docs/source/examples/errorbars.py
|
alexras/boomslang
|
62b6dc3a183fd8686b165c4abdb55d10d537b4ab
|
[
"BSD-3-Clause"
] | 13
|
2017-07-17T15:52:09.000Z
|
2017-07-17T15:52:09.000Z
|
docs/source/examples/errorbars.py
|
alexras/boomslang
|
62b6dc3a183fd8686b165c4abdb55d10d537b4ab
|
[
"BSD-3-Clause"
] | null | null | null |
plot = Plot()
# Uneven error bars
line = Line()
line.xValues = range(6)
line.yValues = [25, 21, 30, 23, 10, 30]
line.yMins = [10, 18, 10, 10, 5, 20]
line.yMaxes = [30, 50, 40, 30, 20, 45]
line.label = "Asymmetric Errors"
line.color = "red"
line.xValues = range(len(line.yValues))
# Even error bars
line2 = Line()
line2.xValues = range(6)
line2.yValues = [35, 40, 45, 40, 55, 50]
line2.color = "blue"
line2.label = "Symmetric Errors"
line2.yErrors = [3, 6, 5, 3, 5, 4]
plot.add(line)
plot.add(line2)
plot.xLabel = "X Label"
plot.yLabel = "Y Label"
plot.hasLegend()
plot.save("errorbars.png")
| 21.25
| 40
| 0.653782
| 100
| 595
| 3.89
| 0.46
| 0.092545
| 0.082262
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.124249
| 0.161345
| 595
| 27
| 41
| 22.037037
| 0.655311
| 0.055462
| 0
| 0
| 0
| 0
| 0.119857
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab7ab117b9f5f218dd655ac6fdbc592e4f1c7636
| 59,134
|
py
|
Python
|
futura_image/mendoza_beltran_utilities.py
|
pjamesjoyce/futura_image
|
09b46575ff19eef5da9164b8c57cfd691790ad7e
|
[
"BSD-3-Clause"
] | null | null | null |
futura_image/mendoza_beltran_utilities.py
|
pjamesjoyce/futura_image
|
09b46575ff19eef5da9164b8c57cfd691790ad7e
|
[
"BSD-3-Clause"
] | null | null | null |
futura_image/mendoza_beltran_utilities.py
|
pjamesjoyce/futura_image
|
09b46575ff19eef5da9164b8c57cfd691790ad7e
|
[
"BSD-3-Clause"
] | 1
|
2020-11-02T13:49:55.000Z
|
2020-11-02T13:49:55.000Z
|
#!/usr/bin/env python
from wurst import *
from wurst.searching import *
from wurst.transformations.activity import change_exchanges_by_constant_factor
from wurst.transformations.uncertainty import rescale_exchange
from wurst.IMAGE.io import *
from wurst.IMAGE import *
from wurst.ecoinvent.electricity_markets import *
from wurst.ecoinvent.filters import *
from wurst.transformations.geo import *
import pandas as pd
from pprint import pprint
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
image_filename = os.path.join(dir_path, 'assets/Image variable names.xlsx')
image_variable_names = pd.read_excel(image_filename)
REGIONS = image_variable_names['Regions'].dropna().values
scenarios = {'BAU': {}, '450': {}}
scenarios['BAU']['filepath'] = r'SSPs_data/SSPs/SSP2'
scenarios['BAU']['description'] = "Middle of the Road (Medium challenges to mitigation and adaptation)"
scenarios['450']['filepath'] = r'SSPs_data/SSPs/SSP2_450'
scenarios['450']['description'] = "Middle of the Road (Medium challenges to mitigation and adaptation), 4.5W.m-2 target"
#
# <p>We import some datasets from simapro. These functions clean up the datasets:</p>
#
def fix_unset_technosphere_and_production_exchange_locations(db, matching_fields=('name', 'unit')):
for ds in db:
for exc in ds['exchanges']:
if exc['type'] == 'production' and exc.get('location') is None:
exc['location'] = ds['location']
elif exc['type'] == 'technosphere' and exc.get('location') is None:
locs = find_location_given_lookup_dict(db,
{k: exc.get(k) for k in matching_fields})
if len(locs) == 1:
exc['location'] = locs[0]
else:
print("No unique location found for exchange:\n{}\nFound: {}".format(
pprint.pformat(exc), locs
))
def find_location_given_lookup_dict(db, lookup_dict):
return [x['location'] for x in get_many(db, *[equals(k, v) for k, v in lookup_dict.items()])]
exists = lambda x: {k: v for k, v in x.items() if v is not None}
def remove_nones(db):
for ds in db:
ds['exchanges'] = [exists(exc) for exc in ds['exchanges']]
def set_global_location_for_additional_datasets(db):
""" This function is needed because the wurst function relink_technosphere exchanges needs global datasets if if can't find a regional one."""
non_ecoinvent_datasets = [x['name'] for x in input_db if x['database'] != 'ecoinvent']
ecoinvent_datasets = [x['name'] for x in input_db if x['database'] == 'ecoinvent']
for ds in [x for x in db if x['database'] in ['Carma CCS', 'CSP']]:
ds['location'] = 'GLO'
for exc in [x for x in ds['exchanges'] if x['type'] != 'biosphere']:
if exc['name'] in non_ecoinvent_datasets:
if exc['name'] in ecoinvent_datasets and exc['location'] != 'GLO':
print(exc['name'], exc['location'])
else:
exc['location'] = 'GLO'
def add_new_locations_to_added_datasets(db):
# We create a new version of all added electricity generation datasets for each IMAGE region.
# We allow the upstream production to remain global, as we are mostly interested in regionalizing
# to take advantage of the regionalized IMAGE data.
# step 1: make copies of all datasets for new locations
# best would be to regionalize datasets for every location with an electricity market like this:
# locations = {x['location'] for x in get_many(db, *electricity_market_filter_high_voltage)}
# but this takes quite a long time. For now, we just use 1 location that is uniquely in each IMAGE region.
possibles = {}
for reg in REGIONS[:-1]:
temp = [x for x in geomatcher.intersects(('IMAGE', reg)) if type(x) != tuple]
possibles[reg] = [x for x in temp if len(ecoinvent_to_image_locations(x)) == 1]
if not len(possibles[reg]): print(reg, ' has no good candidate')
locations = [v[0] for v in possibles.values()]
# This code would modify every new dataset, but this would be quite large:
# for ds in pyprind.prog_bar([ds for ds in db if ds['database'] in ['CSP','Carma CCS']]):
# so we consider only the final electricity production dataset and not the upstream impacts:
for ds in pyprind.prog_bar([ds for ds in db if ds['name'] in carma_electricity_ds_name_dict.keys()]):
for location in locations:
new_ds = copy_to_new_location(ds, location)
db.append(new_ds)
def regionalize_added_datasets(db):
# step 2: relink all processes in each dataset
# This code would modify every new dataset, but this would be quite large:
# for ds in pyprind.prog_bar([ds for ds in db if ds['database'] in ['CSP','Carma CCS']]):
# so we consider only the final electricity production dataset and not the upstream impacts:
for ds in [ds for ds in db if ds['name'] in carma_electricity_ds_name_dict.keys()]:
ds = relink_technosphere_exchanges(ds, db, exclusive=True, drop_invalid=False, biggest_first=False,
contained=False)
#
# <h1 id="Modify-electricity-datasets">Modify electricity datasets<a class="anchor-link" href="#Modify-electricity-datasets">¶</a></h1>
#
# In[12]:
def update_ecoinvent_efficiency_parameter(ds, scaling_factor):
parameters = ds['parameters']
possibles = ['efficiency', 'efficiency_oil_country', 'efficiency_electrical']
for key in possibles:
try:
parameters[key] /= scaling_factor
return
except KeyError:
pass
# In[13]:
def find_coal_efficiency_scaling_factor(ds, year, image_efficiency, agg_func=np.average):
# Input a coal electricity dataset and year. We look up the efficiency for this region and year
# from the Image model and return the scaling factor by which to multiply all efficiency dependent exchanges.
# If the ecoinvent region corresponds to multiple Image regions we simply average them.
ecoinvent_eff = find_ecoinvent_coal_efficiency(ds)
image_locations = ecoinvent_to_image_locations(ds['location'])
image_eff = agg_func(
image_efficiency.loc[year][image_locations].values) # we take an average of all applicable image locations
return ecoinvent_eff / image_eff
# In[14]:
def find_gas_efficiency_scaling_factor(ds, year, image_efficiency, agg_func=np.average):
# Input a gas electricity dataset and year. We look up the efficiency for this region and year
# from the Image model and return the scaling factor by which to multiply all efficiency dependent exchanges.
# If the ecoinvent region corresponds to multiple Image regions we simply average them.
ecoinvent_eff = find_ecoinvent_gas_efficiency(ds)
image_locations = ecoinvent_to_image_locations(ds['location'])
image_eff = agg_func(
image_efficiency.loc[year][image_locations].values) # we take an average of all applicable image locations
return ecoinvent_eff / image_eff
# In[15]:
def find_oil_efficiency_scaling_factor(ds, year, image_efficiency, agg_func=np.average):
# Input a oil electricity dataset and year. We look up the efficiency for this region and year
# from the Image model and return the scaling factor by which to multiply all efficiency dependent exchanges.
# If the ecoinvent region corresponds to multiple Image regions we simply average them.
ecoinvent_eff = find_ecoinvent_oil_efficiency(ds)
image_locations = ecoinvent_to_image_locations(ds['location'])
image_eff = agg_func(
image_efficiency.loc[year][image_locations].values) # we take an average of all applicable image locations
return ecoinvent_eff / image_eff
# In[16]:
def find_biomass_efficiency_scaling_factor(ds, year, image_efficiency, agg_func=np.average):
# Input an electricity dataset and year. We look up the efficiency for this region and year
# from the Image model and return the scaling factor by which to multiply all efficiency dependent exchanges.
# If the ecoinvent region corresponds to multiple Image regions we simply average them.
ecoinvent_eff = find_ecoinvent_biomass_efficiency(ds)
image_locations = ecoinvent_to_image_locations(ds['location'])
image_eff = agg_func(
image_efficiency.loc[year][image_locations].values) # we take an average of all applicable image locations
return ecoinvent_eff / image_eff
# In[17]:
def find_nuclear_efficiency_scaling_factor(ds, year, image_efficiency, agg_func=np.average):
# Input an electricity dataset and year. We look up the efficiency for this region and year
# from the Image model and return the scaling factor compared to the improvement since 2012.
# We do not consider the ecoinvent efficiency in 2012 as it is rather difficult to calculate and
# the burnup is not available.
# This is a simplification and certainly has it's weaknesses,
# however we argue that it's better than not chaning the datasets at all.
# If the ecoinvent region corresponds to multiple Image regions we simply average them.
image_locations = ecoinvent_to_image_locations(ds['location'])
image_uranium_efficiency = agg_func(
image_efficiency.loc[year][image_locations].values) # we take an average of all applicable image locations
image_uranium_efficiency_2012 = agg_func(image_efficiency.loc[2012][image_locations].values)
return image_uranium_efficiency_2012 / image_uranium_efficiency
# In[18]:
def find_ecoinvent_coal_efficiency(ds):
# Nearly all coal power plant datasets have the efficiency as a parameter.
# If this isn't available, we back calculate it using the amount of coal used and
# an average energy content of coal.
try:
return ds['parameters']['efficiency']
except KeyError:
pass
# print('Efficiency parameter not found - calculating generic coal efficiency factor', ds['name'], ds['location'])
fuel_sources = technosphere(ds,
either(contains('name', 'hard coal'), contains('name', 'lignite')),
doesnt_contain_any('name', ('ash', 'SOx')),
equals('unit', 'kilogram'))
energy_in = 0
for exc in fuel_sources:
if 'hard coal' in exc['name']:
energy_density = 20.1 / 3.6 # kWh/kg
elif 'lignite' in exc['name']:
energy_density = 9.9 / 3.6 # kWh/kg
else:
raise ValueError("Shouldn't happen because of filters!!!")
energy_in += (exc['amount'] * energy_density)
ds['parameters']['efficiency'] = reference_product(ds)['amount'] / energy_in
# print(ds['parameters']['efficiency'])
return reference_product(ds)['amount'] / energy_in
# In[19]:
def find_ecoinvent_gas_efficiency(ds):
# Nearly all gas power plant datasets have the efficiency as a parameter.
# If this isn't available, we back calculate it using the amount of gas used and an average energy content of gas.
try:
return ds['parameters']['efficiency']
except KeyError:
pass
# print('Efficiency parameter not found - calculating generic gas efficiency factor', ds['name'], ds['location'])
fuel_sources = technosphere(ds,
either(contains('name', 'natural gas, low pressure'),
contains('name', 'natural gas, high pressure')),
equals('unit', 'cubic meter'))
energy_in = 0
for exc in fuel_sources:
# (based on energy density of natural gas input for global dataset
# 'electricity production, natural gas, conventional power plant')
if 'natural gas, high pressure' in exc['name']:
energy_density = 39 / 3.6 # kWh/m3
# (based on average energy density of high pressure gas, scaled by the mass difference listed between
# high pressure and low pressure gas in the dataset:
# natural gas pressure reduction from high to low pressure, RoW)
elif 'natural gas, low pressure' in exc['name']:
energy_density = 39 * 0.84 / 3.6 # kWh/m3
else:
raise ValueError("Shouldn't happen because of filters!!!")
energy_in += (exc['amount'] * energy_density)
ds['parameters']['efficiency'] = reference_product(ds)['amount'] / energy_in
# print(ds['parameters']['efficiency'])
return reference_product(ds)['amount'] / energy_in
# In[20]:
def find_ecoinvent_oil_efficiency(ds):
# Nearly all oil power plant datasets have the efficiency as a parameter.
# If this isn't available, we use global average values to calculate it.
try:
return ds['parameters']['efficiency_oil_country']
except KeyError:
pass
# print('Efficiency parameter not found - calculating generic oil efficiency factor', ds['name'], ds['location'])
fuel_sources = [x for x in technosphere(ds, *[contains('name', 'heavy fuel oil'),
equals('unit', 'kilogram')]
)]
energy_in = 0
for exc in fuel_sources:
# (based on energy density of heavy oil input and efficiency parameter for dataset
# 'electricity production, oil, RoW')
energy_density = 38.5 / 3.6 # kWh/m3
energy_in += (exc['amount'] * energy_density)
ds['parameters']['efficiency'] = reference_product(ds)['amount'] / energy_in
# print(ds['parameters']['efficiency'])
return reference_product(ds)['amount'] / energy_in
# In[21]:
def find_ecoinvent_biomass_efficiency(ds):
# Nearly all power plant datasets have the efficiency as a parameter. If this isn't available, we excl.
try:
return ds['parameters']['efficiency_electrical']
except:
pass
if ds['name'] == 'heat and power co-generation, biogas, gas engine, label-certified':
ds['parameters'] = {'efficiency_electrical': 0.32}
return ds['parameters']['efficiency_electrical'] # in general comments for dataset
elif ds['name'] == 'wood pellets, burned in stirling heat and power co-generation unit, 3kW electrical, future':
ds['parameters'] = {'efficiency_electrical': 0.23}
return ds['parameters']['efficiency_electrical'] # in comments for dataset
print(ds['name'], ds['location'], ' Efficiency not found!')
return 0
# In[22]:
def get_exchange_amounts(ds, technosphere_filters=None, biosphere_filters=None):
result = {}
for exc in technosphere(ds, *(technosphere_filters or [])):
result[(exc['name'], exc['location'])] = exc['amount']
for exc in biosphere(ds, *(biosphere_filters or [])):
result[(exc['name'], exc.get('categories'))] = exc['amount']
return result
# In[23]:
retained_filter = doesnt_contain_any('name', (
'market for NOx retained',
'market for SOx retained'
))
image_air_pollutants = {
'Methane, fossil': 'CH4',
'Sulfur dioxide': 'SO2',
'Carbon monoxide, fossil': 'CO',
'Nitrogen oxides': 'NOx',
'Dinitrogen monoxide': 'N2O'
}
no_al = [exclude(contains('name', 'aluminium industry'))]
no_ccs = [exclude(contains('name', 'carbon capture and storage'))]
no_markets = [exclude(contains('name', 'market'))]
no_imports = [exclude(contains('name', 'import'))]
generic_excludes = no_al + no_ccs + no_markets
image_mapping = {
'Coal ST': {
'fuel2': 'Coal',
'eff_func': find_coal_efficiency_scaling_factor,
'technology filters': coal_electricity + generic_excludes,
'technosphere excludes': [retained_filter],
},
'Coal CHP': {
'fuel2': 'Coal',
'eff_func': find_coal_efficiency_scaling_factor,
'technology filters': coal_chp_electricity + generic_excludes,
'technosphere excludes': [retained_filter],
},
'Natural gas OC': {
'fuel2': 'Natural gas',
'eff_func': find_gas_efficiency_scaling_factor,
'technology filters': gas_open_cycle_electricity + generic_excludes + no_imports,
'technosphere excludes': [],
},
'Natural gas CC': {
'fuel2': 'Natural gas',
'eff_func': find_gas_efficiency_scaling_factor,
'technology filters': gas_combined_cycle_electricity + generic_excludes + no_imports,
'technosphere excludes': [],
},
'Natural gas CHP': {
'fuel2': 'Natural gas',
'eff_func': find_gas_efficiency_scaling_factor,
'technology filters': gas_chp_electricity + generic_excludes + no_imports,
'technosphere excludes': [],
},
'Oil ST': {
'fuel2': 'Heavy liquid fuel',
'eff_func': find_oil_efficiency_scaling_factor,
'technology filters': oil_open_cycle_electricity + generic_excludes + [exclude(contains('name', 'nuclear'))],
'technosphere excludes': [],
},
'Oil CC': {
'fuel2': 'Heavy liquid fuel',
'eff_func': find_oil_efficiency_scaling_factor,
'technology filters': oil_combined_cycle_electricity + generic_excludes + [
exclude(contains('name', 'nuclear'))],
'technosphere excludes': [],
},
'Oil CHP': {
'fuel2': 'Heavy liquid fuel',
'eff_func': find_oil_efficiency_scaling_factor,
'technology filters': oil_chp_electricity + generic_excludes + [exclude(contains('name', 'nuclear'))],
'technosphere excludes': [],
},
'Biomass ST': {
'fuel2': 'Biomass',
'eff_func': find_biomass_efficiency_scaling_factor,
'technology filters': biomass_electricity + generic_excludes,
'technosphere excludes': [],
},
'Biomass CHP': {
'fuel2': 'Biomass',
'eff_func': find_biomass_efficiency_scaling_factor,
'technology filters': biomass_chp_electricity + generic_excludes,
'technosphere excludes': [],
},
'Biomass CC': {
'fuel2': 'Biomass',
'eff_func': find_biomass_efficiency_scaling_factor,
'technology filters': biomass_combined_cycle_electricity + generic_excludes,
'technosphere excludes': [],
},
'Nuclear': {
'fuel2': None, # image parameter doesn't exist for nuclear
'eff_func': find_nuclear_efficiency_scaling_factor,
'technology filters': nuclear_electricity + generic_excludes,
'technosphere excludes': [],
},
}
def update_electricity_datasets_with_image_data(db, year, scenario, agg_func=np.average, update_efficiency=True,
update_emissions=True):
"""
#for the moment we assume that particulates reduce according to the efficiency as we don't have any better data.
"""
changes = {}
for image_technology in image_mapping:
print('Changing ', image_technology)
md = image_mapping[image_technology]
image_efficiency = get_image_efficiencies(scenario, image_technology)
if image_technology != 'Nuclear':
image_emissions = get_image_electricity_emission_factors(scenario, image_efficiency, fuel2=md.get('fuel2'))
for ds in get_many(db, *md['technology filters']):
changes[ds['code']] = {}
changes[ds['code']].update({('meta data', x): ds[x] for x in ['name', 'location']})
changes[ds['code']].update({('meta data', 'Image technology'): image_technology})
changes[ds['code']].update({('original exchanges', k): v for k, v in get_exchange_amounts(ds).items()})
if update_efficiency == True:
# Modify using IMAGE efficiency values:
scaling_factor = md['eff_func'](ds, year, image_efficiency, agg_func)
update_ecoinvent_efficiency_parameter(ds, scaling_factor)
change_exchanges_by_constant_factor(ds, scaling_factor, md['technosphere excludes'],
[doesnt_contain_any('name', image_air_pollutants)])
if image_technology != 'Nuclear': # We don't update emissions for nuclear
if update_emissions == True:
# Modify using IMAGE specific emissions data
for exc in biosphere(ds, either(*[contains('name', x) for x in image_air_pollutants])):
image_locations = (ds['location'])
flow = image_air_pollutants[exc['name']]
amount = agg_func(image_emissions[flow].loc[year][image_locations].values)
# if new amount isn't a number:
if np.isnan(amount):
print('Not a number! Setting exchange to zero' + ds['name'], exc['name'], ds['location'])
rescale_exchange(exc, 0)
# if old amound was zero:
elif exc['amount'] == 0:
exc['amount'] = 1
rescale_exchange(exc, amount / exc['amount'], remove_uncertainty=True)
else:
rescale_exchange(exc, amount / exc['amount'])
changes[ds['code']].update({('updated exchanges', k): v for k, v in get_exchange_amounts(ds).items()})
return changes
#
# <h2 id="Modify-Carma-Datasets">Modify Carma Datasets<a class="anchor-link" href="#Modify-Carma-Datasets">¶</a></h2>
#
#
# <p>Carma datasets are CCS datasets taken from project Carma - see Volkart 2013.</p>
#
# In[24]:
carma_electricity_ds_name_dict = {
'Electricity, at wood burning power plant 20 MW, truck 25km, post, pipeline 200km, storage 1000m/2025': 'Biomass CCS',
'Electricity, at power plant/natural gas, NGCC, no CCS/2025/kWh': 'Natural gas CC',
'Electricity, at power plant/natural gas, pre, pipeline 400km, storage 3000m/2025': 'Natural gas CCS',
'Electricity, at BIGCC power plant 450MW, pre, pipeline 200km, storage 1000m/2025': 'Biomass CCS',
'Electricity, at power plant/hard coal, PC, no CCS/2025': 'Coal ST',
'Electricity, at power plant/hard coal, IGCC, no CCS/2025': 'IGCC',
'Electricity, at wood burning power plant 20 MW, truck 25km, no CCS/2025': 'Biomass ST',
'Electricity, at power plant/natural gas, pre, pipeline 200km, storage 1000m/2025': 'Natural gas CCS',
'Electricity, at power plant/lignite, PC, no CCS/2025': 'Coal ST',
'Electricity, at power plant/hard coal, pre, pipeline 200km, storage 1000m/2025': 'Coal CCS',
'Electricity, from CC plant, 100% SNG, truck 25km, post, pipeline 200km, storage 1000m/2025': 'Biomass CCS',
'Electricity, at wood burning power plant 20 MW, truck 25km, post, pipeline 400km, storage 3000m/2025': 'Biomass CCS',
'Electricity, at power plant/hard coal, oxy, pipeline 400km, storage 3000m/2025': 'Coal CCS',
'Electricity, at power plant/lignite, oxy, pipeline 200km, storage 1000m/2025': 'Coal CCS',
'Electricity, at power plant/hard coal, post, pipeline 400km, storage 3000m/2025': 'Coal CCS',
'Electricity, at power plant/lignite, pre, pipeline 200km, storage 1000m/2025': 'Coal CCS',
'Electricity, at BIGCC power plant 450MW, pre, pipeline 400km, storage 3000m/2025': 'Biomass CCS',
'Electricity, at power plant/natural gas, post, pipeline 400km, storage 1000m/2025': 'Natural gas CCS',
'Electricity, at power plant/lignite, post, pipeline 400km, storage 3000m/2025': 'Coal CCS',
'Electricity, at power plant/hard coal, post, pipeline 400km, storage 1000m/2025': 'Coal CCS',
'Electricity, from CC plant, 100% SNG, truck 25km, post, pipeline 400km, storage 3000m/2025': 'Biomass CCS',
'Electricity, at power plant/natural gas, ATR H2-CC, no CCS/2025': 'Natural gas CCS',
'Electricity, at power plant/hard coal, pre, pipeline 400km, storage 3000m/2025': 'Coal CCS',
'Electricity, at power plant/lignite, IGCC, no CCS/2025': 'IGCC',
'Electricity, at power plant/hard coal, post, pipeline 200km, storage 1000m/2025': 'Coal CCS',
'Electricity, at power plant/lignite, oxy, pipeline 400km, storage 3000m/2025': 'Coal CCS',
'Electricity, at power plant/lignite, post, pipeline 200km, storage 1000m/2025': 'Coal CCS',
'Electricity, at power plant/lignite, pre, pipeline 400km, storage 3000m/2025': 'Coal CCS',
'Electricity, at power plant/natural gas, post, pipeline 200km, storage 1000m/2025': 'Natural gas CCS',
'Electricity, at power plant/natural gas, post, pipeline 400km, storage 3000m/2025': 'Natural gas CCS',
'Electricity, at BIGCC power plant 450MW, no CCS/2025': 'Biomass ST',
'Electricity, from CC plant, 100% SNG, truck 25km, no CCS/2025': 'Biomass ST',
'Electricity, at power plant/hard coal, oxy, pipeline 200km, storage 1000m/2025': 'Coal CCS'
}
# In[25]:
def modify_all_carma_electricity_datasets(db, year, scenario, update_efficiency=True, update_emissions=True):
# First determine which image efficiency dataset needs to be used:
image_emissions = {}
for fuel2 in ['Coal', 'Natural gas', 'Biomass']:
image_emissions[fuel2] = get_image_electricity_emissions_per_input_energy(scenario, fuel2,
sector='Power generation')
fuel_dict = {'Biomass CCS': 'Biomass',
'Biomass ST': 'Biomass',
'Coal CCS': 'Coal',
'Coal ST': 'Coal',
'IGCC': 'Coal',
'Natural gas CC': 'Natural gas',
'Natural gas CCS': 'Natural gas'}
for name, tech in carma_electricity_ds_name_dict.items():
image_efficiency = get_image_efficiencies(scenario, tech)
for ds in get_many(db, equals('name', name)):
if update_efficiency:
if 'Electricity, at BIGCC power plant 450MW' in ds['name']:
modify_carma_BIGCC_efficiency(ds, year, scenario, image_efficiency)
else:
modify_standard_carma_dataset_efficiency(ds, year, scenario, image_efficiency)
if update_emissions:
modify_carma_dataset_emissions(db, ds, year, scenario, image_emissions[fuel_dict[tech]])
# The efficiency defined by image also includes the electricity consumed in the carbon capture process,
# so we have to set this exchange amount to zero:
if update_efficiency:
for ds in get_many(db, contains('name', 'CO2 capture')):
for exc in technosphere(ds, *[contains('name', 'Electricity'), equals('unit', 'kilowatt hour')]):
exc['amount'] = 0
# In[26]:
def modify_carma_dataset_emissions(db, ds, year, scenario, emission_df):
# The dataset passed to this function doesn't have the biosphere flows directly.
# Rather, it has an exchange (with unit MJ) that contains the biosphere flows per unit fuel input.
biosphere_mapping = {'CH4': 'Methane, fossil',
'SO2': 'Sulfur dioxide',
'CO': 'Carbon monoxide, fossil',
'NOx': 'Nitrogen oxides',
'N2O': 'Dinitrogen monoxide'}
image_locations = ecoinvent_to_image_locations(ds['location'])
exc_dataset_names = [x['name'] for x in technosphere(ds, equals('unit', 'megajoule'))]
for exc_dataset in get_many(db, *[
either(*[equals('name', exc_dataset_name) for exc_dataset_name in exc_dataset_names])]):
if len(list(biosphere(exc_dataset))) == 0:
modify_carma_dataset_emissions(db, exc_dataset, year, scenario, emission_df)
continue
# Modify using IMAGE emissions data
for key, value in biosphere_mapping.items():
for exc in biosphere(exc_dataset, contains('name', value)):
exc['amount'] = np.average(emission_df[key].loc[year][image_locations].values)
if np.isnan(exc['amount']):
print('Not a number! Setting exchange to zero' + ds['name'], exc['name'], ds['location'])
exc['amount'] = 0
return
# In[27]:
def modify_standard_carma_dataset_efficiency(ds, year, scenario, image_efficiency):
if 'Electricity, at BIGCC power plant 450MW' in ds['name']:
print("This function can't modify dataset: ", ds['name'], "It's got a different format.")
return
image_locations = ecoinvent_to_image_locations(ds['location'])
image_efficiency = np.average(image_efficiency.loc[year][image_locations].values)
# All other carma electricity datasets have a single exchange that is the combustion of a fuel in MJ.
# We can just scale this exchange and efficiency related changes will be done
for exc in technosphere(ds):
exc['amount'] = 3.6 / image_efficiency
return
# In[28]:
def modify_carma_BIGCC_efficiency(ds, year, scenario, image_efficiency):
image_locations = ecoinvent_to_image_locations(ds['location'])
image_efficiency = np.average(image_efficiency.loc[year][image_locations].values)
old_efficiency = 3.6 / get_one(technosphere(ds), *[contains('name', 'Hydrogen, from steam reforming')])['amount']
for exc in technosphere(ds):
exc['amount'] = exc['amount'] * old_efficiency / image_efficiency
return
#
# <h1 id="Get-image-model-results">Get image model results<a class="anchor-link" href="#Get-image-model-results">¶</a></h1>
#
# In[29]:
def read_image_data_Reg_x_Tech(scenario, technology, filename_extension, world_calc_type):
# This function imports a set of results from image for a certain scenario and returns
# a dataframe with the values for all years and regions for a specific technology.
# Possible choices are listed in image_variable_names['Technology']
fp = os.path.join(scenarios[scenario]['filepath'], filename_extension)
image_output = load_image_data_file(fp)
result = {}
lookup_number = np.where(image_variable_names['Technology'] == technology)[0][0]
for year in image_output.years:
result[year] = {}
for region, vector in zip(REGIONS[:],
image_output.data[:, lookup_number, list(image_output.years).index(year)]):
result[year][region] = vector
result = pd.DataFrame.from_dict(result, orient='index')
if world_calc_type == 'mean':
result['World'] = result.mean(axis=1)
elif world_calc_type == 'sum':
result['World'] = result.sum(axis=1)
else:
print("can't calculate world")
return result
#
# <h2 id="Nuclear-Electricity-Efficiency">Nuclear Electricity Efficiency<a class="anchor-link" href="#Nuclear-Electricity-Efficiency">¶</a></h2>
#
# In[30]:
def read_electricity_uranium_consumption(scenario):
# This function imports the efficiency of nuclear power plants in GJ electricity /kg uranium
fp = os.path.join(scenarios[scenario]['filepath'], "tuss", "nuclfueleff.out")
image_output = load_image_data_file(fp)
result = {}
for year in image_output.years:
result[year] = {}
for region, vector in zip(REGIONS[:], image_output.data[:, list(image_output.years).index(year)]):
result[year][region] = vector
df = pd.DataFrame.from_dict(result, orient='index')
df.replace({0: np.nan},
inplace=True) # we set all zero values to NaN so that the global average is calcuated only from values that exist.
df['World'] = df.mean(axis=1)
return df
#
# <h2 id="Fossil-Electricity-Efficiency">Fossil Electricity Efficiency<a class="anchor-link" href="#Fossil-Electricity-Efficiency">¶</a></h2>
#
# In[31]:
def get_image_efficiencies(scenario, technology):
# This function imports a set of results from image for a certain scenario and returns
# a dataframe with the efficiency values for all years and regions for a specific technology.
# possible choices are listed in image_variable_names['Technology']
fp = os.path.join(scenarios[scenario]['filepath'], "tuss", "EPG", "ElecEffAvg.out")
elec_eff_avg = load_image_data_file(fp)
image_efficiency = {}
lookup_number = np.where(image_variable_names['Technology'] == technology)[0][0]
for year in elec_eff_avg.years:
image_efficiency[year] = {}
for region, vector in zip(REGIONS[:],
elec_eff_avg.data[:, lookup_number, list(elec_eff_avg.years).index(year)]):
image_efficiency[year][region] = vector
image_efficiency = pd.DataFrame.from_dict(image_efficiency, orient='index')
image_efficiency['World'] = image_efficiency.mean(axis=1)
return image_efficiency
#
# <h2 id="Fossil-Electricity-Emissions">Fossil Electricity Emissions<a class="anchor-link" href="#Fossil-Electricity-Emissions">¶</a></h2>
#
# In[32]:
def get_image_electricity_emission_factors(scenario, image_efficiency, fuel2, sector='Power generation'):
# This function imports a set of results from image for a certain scenario and returns
# a dictionary of dataframes each with the emission values for all years and regions for one pollutant.
# possible fuel2 choices are listed in image_variable_names['Fuel2']
emission_dict = {'CH4': "ENEFCH4.out",
'CO': "ENEFCO.out",
'N2O': "ENEFN2O.out",
'NOx': "ENEFNOx.out",
'SO2': "ENEFSO2.out",
'BC': "ENEFBC.out",
}
elec_emission_factors = {}
for k, v in emission_dict.items():
fp = os.path.join(scenarios[scenario]['filepath'], "indicatoren", v)
elec_emission_factors[k] = load_image_data_file(fp)
# We currently don't have a good way to deal with the fact that ecoinvent has many different VOCs listed.
# For the moment we just allow them to scale with the efficiency.
# Note that we don't import CO2 results as these are calculated by scaling using efficiency.
# This is more accurate as it considers that ecoinvent is more accurate regarding the energy content
# of the specific fuel used.
image_emissions = {}
fuel2_number = np.where(image_variable_names['Fuel2'] == fuel2)[0][0]
sector_number = np.where(image_variable_names['Sector'] == sector)[0][0]
for key, value in elec_emission_factors.items():
image_emissions[key] = {}
for year in elec_emission_factors[key].years:
image_emissions[key][year] = {}
for region, vector in zip(REGIONS[:-1], value.data[:, sector_number, fuel2_number,
list(elec_emission_factors[key].years).index(year)]):
image_emissions[key][year][region] = vector
image_emissions[key] = pd.DataFrame.from_dict(image_emissions[key], orient='index')
# Note that Image reports emissions pre unit of fuel in, so we have to make a couple of calculations
if key == 'BC':
image_emissions[key] = (image_emissions[key].divide(image_efficiency,
axis=0)) * 3.6e-3 # convert to kg/kWh of electricity
else:
image_emissions[key] = (image_emissions[key].divide(image_efficiency,
axis=0)) * 3.6e-6 # convert to kg/kWh of electricity
image_emissions[key].replace({0: np.nan},
inplace=True) # we set all zero values to NaN so that the global average is calcuated only from values that exist.
image_emissions[key]['World'] = image_emissions[key].mean(axis=1)
image_emissions[key].fillna(0, inplace=True) # set nan values back to zero.
return image_emissions
# In[33]:
def get_image_electricity_emissions_per_input_energy(scenario, fuel2, sector='Power generation'):
# This function imports a set of results from image for a certain scenario and returns
# a dictionary of dataframes each with the emission values for all years and regions for one pollutant.
# possible fuel2 choices are listed in image_variable_names['Fuel2']
elec_emission_factors = {}
fp = os.path.join(scenarios[scenario]['filepath'], "indicatoren", "ENEFCH4.out")
elec_emission_factors['CH4'] = load_image_data_file(fp)
fp = os.path.join(scenarios[scenario]['filepath'], "indicatoren", "ENEFCO.out")
elec_emission_factors['CO'] = load_image_data_file(fp)
fp = os.path.join(scenarios[scenario]['filepath'], "indicatoren", "ENEFN2O.out")
elec_emission_factors['N2O'] = load_image_data_file(fp)
fp = os.path.join(scenarios[scenario]['filepath'], "indicatoren", "ENEFNOx.out")
elec_emission_factors['NOx'] = load_image_data_file(fp)
fp = os.path.join(scenarios[scenario]['filepath'], "indicatoren", "ENEFSO2.out")
elec_emission_factors['SO2'] = load_image_data_file(fp)
fp = os.path.join(scenarios[scenario]['filepath'], "indicatoren", "ENEFBC.out")
elec_emission_factors['BC'] = load_image_data_file(fp)
# We currently don't have a good way to deal with the fact that ecoinvent has many different VOCs listed.
# For the moment we just allow them to scale with the efficiency.
# Note that we don't import CO2 results as these are calculated by scaling using efficiency.
# This is more accurate as it considers that ecoinvent is more accurate regarding the energy content of coal.
image_emissions = {}
fuel2_number = np.where(image_variable_names['Fuel2'] == fuel2)[0][0]
sector_number = np.where(image_variable_names['Sector'] == sector)[0][0]
for key, value in elec_emission_factors.items():
image_emissions[key] = {}
for year in elec_emission_factors[key].years:
image_emissions[key][year] = {}
for region, vector in zip(REGIONS[:-1], value.data[:, sector_number, fuel2_number,
list(elec_emission_factors[key].years).index(year)]):
image_emissions[key][year][region] = vector
image_emissions[key] = pd.DataFrame.from_dict(image_emissions[key], orient='index')
# Note that Image reports emissions pre unit of fuel in, so we have to make a couple of calculations
if key == 'BC':
image_emissions[key] = image_emissions[key] * 1e-3 # convert to kg/MJ of input energy
else:
image_emissions[key] = image_emissions[key] * 1e-6 # convert to kg/MJ of input energy
image_emissions[key].replace({0: np.nan},
inplace=True) # we set all zero values to NaN so that the global average is calcuated only from values that exist.
image_emissions[key]['World'] = image_emissions[key].mean(axis=1)
image_emissions[key].fillna(0, inplace=True) # set nan values back to zero.
return image_emissions
#
# <h1 id="Electricity-markets:">Electricity markets:<a class="anchor-link" href="#Electricity-markets:">¶</a></h1>
#
#
# <h2 id="Define-available-technologies:">Define available technologies:<a class="anchor-link" href="#Define-available-technologies:">¶</a></h2>
#
# In[34]:
available_electricity_generating_technologies = {
'Solar PV': ['electricity production, photovoltaic, 3kWp slanted-roof installation, multi-Si, panel, mounted',
'electricity production, photovoltaic, 3kWp slanted-roof installation, single-Si, panel, mounted',
'electricity production, photovoltaic, 570kWp open ground installation, multi-Si'],
'CSP': ['Electricity production for a 50MW parabolic trough power plant', # Will be available in ecoinvent 3.4
'Electricity production at a 20MW solar tower power plant'], # Will be available in ecoinvent 3.4
'Wind onshore': ['electricity production, wind, <1MW turbine, onshore',
'electricity production, wind, 1-3MW turbine, onshore',
'electricity production, wind, >3MW turbine, onshore'],
'Wind offshore': ['electricity production, wind, 1-3MW turbine, offshore'],
'Hydro': ['electricity production, hydro, reservoir, alpine region',
'electricity production, hydro, reservoir, non-alpine region',
'electricity production, hydro, reservoir, tropical region',
'electricity production, hydro, run-of-river'],
'Other renewables': ['electricity production, deep geothermal'],
'Nuclear': ['electricity production, nuclear, boiling water reactor',
'electricity production, nuclear, pressure water reactor, heavy water moderated',
'electricity production, nuclear, pressure water reactor'],
'Coal ST': ['electricity production, hard coal',
'electricity production, lignite'],
'Coal CHP': ['heat and power co-generation, hard coal',
'heat and power co-generation, lignite'],
'IGCC': ['Electricity, at power plant/hard coal, IGCC, no CCS/2025', # From Carma project
'Electricity, at power plant/lignite, IGCC, no CCS/2025'], # From Carma project
'Oil ST': ['electricity production, oil'],
'Oil CHP': ['heat and power co-generation, oil'],
'Oil CC': ['electricity production, oil'], # Use copy of Oil ST here as this doesn't exist in ecoinvent
'Natural gas OC': ['electricity production, natural gas, conventional power plant'],
'Natural gas CC': ['electricity production, natural gas, combined cycle power plant'],
'Natural gas CHP': ['heat and power co-generation, natural gas, combined cycle power plant, 400MW electrical',
'heat and power co-generation, natural gas, conventional power plant, 100MW electrical'],
'Biomass CHP': ['heat and power co-generation, wood chips, 6667 kW, state-of-the-art 2014',
'heat and power co-generation, wood chips, 6667 kW',
'heat and power co-generation, biogas, gas engine'],
'Biomass CC': ['heat and power co-generation, wood chips, 6667 kW, state-of-the-art 2014',
# Use copy of Biomass CHP here as this not available in ecoinvent
'heat and power co-generation, wood chips, 6667 kW',
'heat and power co-generation, biogas, gas engine'],
'Biomass ST': ['heat and power co-generation, wood chips, 6667 kW, state-of-the-art 2014',
# Use copy of Biomass CHP here as this not available in ecoinvent
'heat and power co-generation, wood chips, 6667 kW',
'heat and power co-generation, biogas, gas engine'],
'Coal CCS': ['Electricity, at power plant/hard coal, pre, pipeline 200km, storage 1000m/2025',
'Electricity, at power plant/lignite, pre, pipeline 200km, storage 1000m/2025',
'Electricity, at power plant/hard coal, post, pipeline 200km, storage 1000m/2025',
'Electricity, at power plant/lignite, post, pipeline 200km, storage 1000m/2025',
'Electricity, at power plant/lignite, oxy, pipeline 200km, storage 1000m/2025',
'Electricity, at power plant/hard coal, oxy, pipeline 200km, storage 1000m/2025'],
'Coal CHP CCS': ['Electricity, at power plant/hard coal, pre, pipeline 200km, storage 1000m/2025',
# Carma project didn't include Coal CHP CCS
'Electricity, at power plant/lignite, pre, pipeline 200km, storage 1000m/2025',
'Electricity, at power plant/hard coal, post, pipeline 200km, storage 1000m/2025',
'Electricity, at power plant/lignite, post, pipeline 200km, storage 1000m/2025',
'Electricity, at power plant/lignite, oxy, pipeline 200km, storage 1000m/2025',
'Electricity, at power plant/hard coal, oxy, pipeline 200km, storage 1000m/2025'],
'Oil CCS': ['Electricity, at power plant/hard coal, pre, pipeline 200km, storage 1000m/2025',
# Carma project didn't include oil - we just use all coal and gas datasets as a proxy
'Electricity, at power plant/lignite, pre, pipeline 200km, storage 1000m/2025',
'Electricity, at power plant/hard coal, post, pipeline 200km, storage 1000m/2025',
'Electricity, at power plant/lignite, post, pipeline 200km, storage 1000m/2025',
'Electricity, at power plant/lignite, oxy, pipeline 200km, storage 1000m/2025',
'Electricity, at power plant/hard coal, oxy, pipeline 200km, storage 1000m/2025',
'Electricity, at power plant/natural gas, pre, pipeline 200km, storage 1000m/2025',
'Electricity, at power plant/natural gas, post, pipeline 200km, storage 1000m/2025'],
'Oil CHP CCS': ['Electricity, at power plant/hard coal, pre, pipeline 200km, storage 1000m/2025',
# Carma project didn't include oil - we just use all coal and gas datasets as a proxy
'Electricity, at power plant/lignite, pre, pipeline 200km, storage 1000m/2025',
'Electricity, at power plant/hard coal, post, pipeline 200km, storage 1000m/2025',
'Electricity, at power plant/lignite, post, pipeline 200km, storage 1000m/2025',
'Electricity, at power plant/lignite, oxy, pipeline 200km, storage 1000m/2025',
'Electricity, at power plant/hard coal, oxy, pipeline 200km, storage 1000m/2025',
'Electricity, at power plant/natural gas, pre, pipeline 200km, storage 1000m/2025',
'Electricity, at power plant/natural gas, post, pipeline 200km, storage 1000m/2025'],
'Natural gas CCS': ['Electricity, at power plant/natural gas, pre, pipeline 200km, storage 1000m/2025',
'Electricity, at power plant/natural gas, post, pipeline 200km, storage 1000m/2025'],
'Natural gas CHP CCS': ['Electricity, at power plant/natural gas, pre, pipeline 200km, storage 1000m/2025',
# Copy normal natural gas CCS datasets here
'Electricity, at power plant/natural gas, post, pipeline 200km, storage 1000m/2025'],
'Biomass CCS': ['Electricity, from CC plant, 100% SNG, truck 25km, post, pipeline 200km, storage 1000m/2025',
'Electricity, at wood burning power plant 20 MW, truck 25km, post, pipeline 200km, storage 1000m/2025',
'Electricity, at BIGCC power plant 450MW, pre, pipeline 200km, storage 1000m/2025'],
'Biomass CHP CCS': ['Electricity, from CC plant, 100% SNG, truck 25km, post, pipeline 200km, storage 1000m/2025',
# Copy normal wood CCS datasets here as CHP not available
'Electricity, at wood burning power plant 20 MW, truck 25km, post, pipeline 200km, storage 1000m/2025',
'Electricity, at BIGCC power plant 450MW, pre, pipeline 200km, storage 1000m/2025'],
}
#
# <h2 id="Overall-function-to-change-markets">Overall function to change markets<a class="anchor-link" href="#Overall-function-to-change-markets">¶</a></h2>
#
#
# <p>Function that returns all IMAGE locations that are interested by an ecoinvent location:</p>
#
# In[35]:
# these locations aren't found correctly by the constructiive geometries library - we correct them here:
fix_names = {'CSG': 'CN-CSG',
'SGCC': 'CN-SGCC',
'RFC': 'US-RFC',
'SERC': 'US-SERC',
'TRE': 'US-TRE',
'ASCC': 'US-ASCC',
'HICC': 'US-HICC',
'FRCC': 'US-FRCC',
'SPP': 'US-SPP',
'MRO, US only': 'US-MRO',
'NPCC, US only': 'US-NPCC',
'WECC, US only': 'US-WECC',
'IAI Area, Africa': 'IAI Area 1, Africa',
'IAI Area, South America': 'IAI Area 3, South America',
'IAI Area, Asia, without China and GCC': 'IAI Area 4&5, without China',
'IAI Area, North America, without Quebec': 'IAI Area 2, without Quebec',
'IAI Area, Gulf Cooperation Council': 'IAI Area 8, Gulf'
}
# In[36]:
def ecoinvent_to_image_locations(loc):
if loc == 'RoW':
loc = 'GLO'
if loc in fix_names.keys():
new_loc_name = fix_names[loc]
return [r[1] for r in geomatcher.intersects(new_loc_name) if r[0] == 'IMAGE']
else:
return [r[1] for r in geomatcher.intersects(loc) if r[0] == 'IMAGE']
# In[37]:
def update_electricity_markets(db, year, scenario):
# import the image market mix from the image result files:
image_electricity_market_df = get_image_markets(year, scenario)
# Remove all electricity producers from markets:
db = empty_low_voltage_markets(db)
db = empty_medium_voltage_markets(db)
db = empty_high_voltage_markets(db) # This function isn't working as expected - it needs to delete imports as well.
changes = {}
# update high voltage markets:
for ds in get_many(db, *electricity_market_filter_high_voltage):
changes[ds['code']] = {}
changes[ds['code']].update({('meta data', x): ds[x] for x in ['name', 'location']})
changes[ds['code']].update({('original exchanges', k): v for k, v in get_exchange_amounts(ds).items()})
delete_electricity_inputs_from_market(
ds) # This function will delete the markets. Once Wurst is updated this can be deleted.
add_new_datasets_to_electricity_market(ds, db, image_electricity_market_df)
changes[ds['code']].update({('updated exchanges', k): v for k, v in get_exchange_amounts(ds).items()})
return changes
#
# <h2 id="Define-electricity-market-filters">Define electricity market filters<a class="anchor-link" href="#Define-electricity-market-filters">¶</a></h2>
#
# In[38]:
electricity_market_filter_high_voltage = [contains('name', 'market for electricity, high voltage'),
doesnt_contain_any('name',
['aluminium industry', 'internal use in coal mining'])]
electricity_market_filter_medium_voltage = [contains('name', 'market for electricity, medium voltage'),
doesnt_contain_any('name', ['aluminium industry',
'electricity, from municipal waste incineration'])]
electricity_market_filter_low_voltage = [contains('name', 'market for electricity, low voltage')]
#
# <h2 id="Modify-high-voltage-markets">Modify high voltage markets<a class="anchor-link" href="#Modify-high-voltage-markets">¶</a></h2>
#
# In[39]:
def delete_electricity_inputs_from_market(ds):
# This function reads through an electricity market dataset and deletes all electricity inputs that are not own consumption.
ds['exchanges'] = [exc for exc in get_many(ds['exchanges'], *[either(*[exclude(contains('unit', 'kilowatt hour')),
contains('name',
'market for electricity, high voltage'),
contains('name',
'market for electricity, medium voltage'),
contains('name',
'market for electricity, low voltage'),
contains('name',
'electricity voltage transformation')])])]
# In[40]:
def get_image_markets(year, scenario):
# This returns a pandas dataframe containing the electricity mix for a certain year for all image locations.
# This function is totally inefficient and should be rewritten to consider the year in question. Currently it calculates for all years and then filters out the year in question!
fp = os.path.join(scenarios[scenario]['filepath'], "T2RT", "ElecProdSpec.out")
elec_production = load_image_data_file(fp)
elec_prod = {}
elec_prod_dfs = {}
for i, region in enumerate(REGIONS):
elec_prod[region] = elec_production.data[i, :, :]
elec_prod_dfs[region] = pd.DataFrame(elec_production.data[i, :, :], columns=elec_production.years,
index=image_variable_names['Technology'].dropna().values).T.drop(
'EMPTY CATEGORY!!', axis=1)
for region in REGIONS[:-1]:
elec_prod_dfs['World'] += elec_prod_dfs[region]
df = pd.concat([pd.Series(elec_prod_dfs[region].loc[year], name=region) for region in REGIONS[:-1]], axis=1)
df['World'] = df.sum(axis=1)
empty_columns = find_empty_columns(df)
df = df.divide(df.sum(axis=0)).sort_values(by='World', ascending=False).T.drop(empty_columns, axis=1)
return df
def find_average_mix(df):
# This function considers that there might be several image regions that match the ecoinvent region. This function returns the average mix across all regions.
return df.divide(df.sum().sum()).sum()
# In[41]:
def find_ecoinvent_electricity_datasets_in_same_ecoinvent_location(tech, location, db):
# first try ecoinvent location code:
try:
return [x for x in get_many(db, *[
either(*[equals('name', name) for name in available_electricity_generating_technologies[tech]]),
equals('location', location), equals('unit', 'kilowatt hour')])]
# otherwise try image location code (for new datasets)
except:
return [x for x in get_many(db, *[
either(*[equals('name', name) for name in available_electricity_generating_technologies[tech]]),
equals('location', ecoinvent_to_image_locations(location)), equals('unit', 'kilowatt hour')])]
# In[42]:
def find_other_ecoinvent_regions_in_image_region(loc):
if loc == 'RoW':
loc = 'GLO'
if loc in fix_names:
new_loc_name = fix_names[loc]
image_regions = [r for r in geomatcher.intersects(new_loc_name) if r[0] == 'IMAGE']
else:
image_regions = [r for r in geomatcher.intersects(loc) if r[0] == 'IMAGE']
temp = []
for image_region in image_regions:
temp.extend([r for r in geomatcher.contained(image_region)])
result = []
for temp in temp:
if type(temp) == tuple:
result.append(temp[1])
else:
result.append(temp)
return set(result)
# In[43]:
def find_ecoinvent_electricity_datasets_in_image_location(tech, location, db):
return [x for x in get_many(db, *[
either(*[equals('name', name) for name in available_electricity_generating_technologies[tech]]),
either(*[equals('location', loc) for loc in find_other_ecoinvent_regions_in_image_region(location)]),
equals('unit', 'kilowatt hour')
])]
# In[44]:
def find_ecoinvent_electricity_datasets_in_all_locations(tech, db):
return [x for x in get_many(db, *[
either(*[equals('name', name) for name in available_electricity_generating_technologies[tech]]),
equals('unit', 'kilowatt hour')])]
# In[45]:
def add_new_datasets_to_electricity_market(ds, db, df):
# This function adds new electricity datasets to a market based on image results. We pass not only a dataset to modify, but also a pandas dataframe containing the new electricity mix information, and the db from which we should find the datasets
# find out which image regions correspond to our dataset:
image_locations = ecoinvent_to_image_locations(ds['location'])
# here we find the mix of technologies in the new market and how much they contribute:
mix = find_average_mix(df.loc[image_locations]) # could be several image locations - we just take the average
# here we find the datasets that will make up the mix for each technology
datasets = {}
for i in mix.index:
if mix[i] != 0:
print('Next Technology: ',i)
# First try to find a dataset that is from that location (or image region for new datasets):
datasets[i] = find_ecoinvent_electricity_datasets_in_same_ecoinvent_location(i, ds['location'], db)
print('First round: ',i, [(ds['name'], ds['location']) for ds in datasets[i]])
# If this doesn't work, we try to take a dataset from another ecoinvent region within the same image region
if len(datasets[i]) == 0:
datasets[i] = find_ecoinvent_electricity_datasets_in_image_location(i, ds['location'], db)
print('Second round: ',i, [(ds['name'], ds['location']) for ds in datasets[i]])
# If even this doesn't work, try taking a global datasets
if len(datasets[i]) == 0:
datasets[i] = find_ecoinvent_electricity_datasets_in_same_ecoinvent_location(i, 'GLO', db)
print('Third round: ',i, [(ds['name'], ds['location']) for ds in datasets[i]])
# if no global dataset available, we just take the average of all datasets we have:
if len(datasets[i]) == 0:
datasets[i] = find_ecoinvent_electricity_datasets_in_all_locations(i, db)
print('Fourth round: ',i, [(ds['name'], ds['location']) for ds in datasets[i]])
# If we still can't find a dataset, we just take the global market group
if len(datasets[i]) == 0:
print('No match found for location: ', ds['location'], ' Technology: ', i,
'. Taking global market group for electricity')
datasets[i] = [x for x in get_many(db, *[equals('name', 'market group for electricity, high voltage'),
equals('location', 'GLO')])]
# Now we add the new exchanges:
for i in mix.index:
if mix[i] != 0:
total_amount = mix[i]
amount = total_amount / len(datasets[i])
for dataset in datasets[i]:
ds['exchanges'].append({
'amount': amount,
'unit': dataset['unit'],
'input': (dataset['database'], dataset['code']),
'type': 'technosphere',
'name': dataset['name'],
'location': dataset['location']
})
# confirm that exchanges sum to 1!
sum = np.sum([exc['amount'] for exc in technosphere(ds, *[equals('unit', 'kilowatt hour'),
doesnt_contain_any('name', [
'market for electricity, high voltage'])])])
if round(sum, 4) != 1.00: print(ds['location'], " New exchanges don't add to one! something is wrong!", sum)
return
# In[ ]:
| 47.193935
| 249
| 0.644266
| 7,503
| 59,134
| 4.954685
| 0.100493
| 0.021789
| 0.028083
| 0.035884
| 0.649011
| 0.606052
| 0.55766
| 0.51228
| 0.483739
| 0.464129
| 0
| 0.026126
| 0.247218
| 59,134
| 1,252
| 250
| 47.231629
| 0.808739
| 0.21891
| 0
| 0.35514
| 0
| 0.014686
| 0.306347
| 0.00466
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049399
| false
| 0.006676
| 0.021362
| 0.00534
| 0.12283
| 0.024032
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab7b5b063411a5951884f02b5ca7d911a992e189
| 6,996
|
py
|
Python
|
FJTYFilt_estimator.py
|
openeventdata/FJTY-Filter
|
32937407c798c14118d6956ce44c6ce48aa69fae
|
[
"MIT"
] | null | null | null |
FJTYFilt_estimator.py
|
openeventdata/FJTY-Filter
|
32937407c798c14118d6956ce44c6ce48aa69fae
|
[
"MIT"
] | null | null | null |
FJTYFilt_estimator.py
|
openeventdata/FJTY-Filter
|
32937407c798c14118d6956ce44c6ce48aa69fae
|
[
"MIT"
] | null | null | null |
"""
FJTYFilt_estimator.py
Creates a vectorizer and estimated an SVM model from the files in FILE_NAMES; run a TRAIN_PROP train/test on these
then saves these
TO RUN PROGRAM:
python3 SVM_filter_estimate.py
PROGRAMMING NOTES:
1. There are no summary statistics across the experiments, as these are currently just eyeballed to make sure nothing is
badly out of line.
SYSTEM REQUIREMENTS
This program has been successfully run under Mac OS 10.10.5; it is standard Python 3.5
so it should also run in Unix or Windows.
PROVENANCE:
Programmer: Philip A. Schrodt
Parus Analytics
Charlottesville, VA, 22901 U.S.A.
http://eventdata.parusanalytics.com
Copyright (c) 2017 Philip A. Schrodt. All rights reserved.
This code is covered under the MIT license: http://opensource.org/licenses/MIT
Report bugs to: schrodt735@gmail.com
REVISION HISTORY:
16-Jan-17: Initial version
31-Jan-17: modified to save vectorizer and model, clean up output
02-Jan-17: cmd-line for file list; save estimates
05-Mar-19: modified from SVM_filter_estimate.py for FJ project
=========================================================================================================
"""
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import svm
from time import time
import datetime
import utilFJML
import pickle
import random
import os
N_EXPERIMENTS = 5
TRAIN_PROP = 0.33 # proportion of cases in the training file.
INPUT_FILELIST = "filt-estimator-filelist.txt"
FILE_PATH = "../FJML-Filter/FJTY_training_wordlists"
FILE_NAMES = [line[:-1] for line in open(INPUT_FILELIST, "r")]
LABELS = ["codeable", "sports", "culture/entertainment", "business/finance", "opinion", "crime", "accidents",
"natural disaster", "open", "no codeable content"]
TEST_RESULT_FILE_NAME = "SVM_test_results-"
VECTORZ_PFILE_NAME = "save-vectorizer-Mk2.p"
MODEL_PFILE_NAME = "save-lin_clf-Mk2.p"
N_MODE = 10 # maximum number of unique modes
random.seed()
# Evaluate the model
suffix = utilFJML.get_timed_suffix()
fout = open(TEST_RESULT_FILE_NAME + suffix + ".txt", 'w')
fout.write("SVM_FILTER_ESTIMATE.PY TRAIN/TEST RESULTS\nRun datetime: {:s}\n".format(datetime.datetime.now().strftime('%y-%m-%d %H:%M:%S')))
fout.write("Training cases proportion: {:0.3f}\nTraining files\n".format(TRAIN_PROP))
fout.write("FILE_PATH: " + FILE_PATH + "\n")
for stnm in FILE_NAMES:
fout.write(" " + stnm + '\n')
for kex in range(N_EXPERIMENTS):
fout.write("\n ============ Experiment {:d} ============\n".format(kex + 1))
Y = []
corpus = []
Ytest = []
testcase = []
for filename in FILE_NAMES:
reader = utilFJML.read_file(os.path.join(FILE_PATH, filename))
print("Reading", FILE_PATH + filename)
for krec, rec in enumerate(reader):
if random.random() < TRAIN_PROP:
Y.append(int(rec['mode'][0]))
corpus.append(rec['textInfo']['wordlist'])
else:
Ytest.append(int(rec['mode'][0]))
testcase.append(rec['textInfo']['wordlist'])
vectorizer = TfidfVectorizer(min_df=1)
tfidf2 = vectorizer.fit_transform(corpus)
X = tfidf2.toarray()
t0 = time()
lin_clf = svm.LinearSVC()
lin_clf.fit(X, Y)
print("Time to estimate: {:0.3f} sec".format(time() - t0))
fout.write("Time to estimate: {:0.3f} sec\n".format(time() - t0))
"""LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)"""
#dec = lin_clf.decision_function([[1,1]])
kcorr = 0
classmat = []
for ka in range(N_MODE):
classmat.append(N_MODE*[0])
for ka, xv in enumerate(X):
pred = lin_clf.predict([xv])
classmat[Y[ka]][pred[0]] +=1
if Y[ka] == pred[0]:
kcorr +=1
print('Training set')
fout.write('Training set\n')
for ka, kv in enumerate(classmat):
print(ka,end=' | ')
fout.write(str(ka) + ' | ')
tot = 0
for kb, num in enumerate(kv):
print("{:4d} ".format(num),end='')
fout.write("{:4d} ".format(num))
tot += num
if ka == kb:
main = num
if tot > 0:
print(' {:.2f}'.format(float(main*100)/tot))
fout.write(' {:.2f}\n'.format(float(main*100)/tot))
else:
print(' {:.2f}'.format(0.0))
fout.write(' {:.2f}\n'.format(0.0))
X_test = vectorizer.transform(testcase).toarray()
kt = 0
kcorr = 0
classmat = []
for ka in range(N_MODE):
classmat.append(N_MODE*[0])
t0 = time()
for ka, xv in enumerate(X_test):
kt += 1
pred = lin_clf.predict([xv])
classmat[Ytest[ka]][pred[0]] +=1
if Ytest[ka] == pred[0]:
kcorr +=1
print("Time to fit {:d} cases {:0.3f} sec".format(kt, time() - t0))
fout.write("\nTime to fit {:d} cases {:0.3f} sec\n".format(kt, time() - t0))
print('Test set')
fout.write('Test set\n')
for ka, kv in enumerate(classmat):
tot = 0
print("{:>22s} | ".format(LABELS[ka]),end='')
fout.write("{:>22s} | ".format(LABELS[ka]))
nnc = 0
for kb, num in enumerate(kv):
print("{:4d} ".format(num),end='')
fout.write("{:4d} ".format(num))
tot += num
if ka == kb:
main = num
if kb != 0:
nnc += num
if tot > 0:
print(' {:4d} ({:6.2f}%) {:6.2f}%'.format(tot,float(tot*100)/kt, float(main*100)/tot), end="")
fout.write(' {:4d} ({:6.2f}%) {:6.2f}%'.format(tot,float(tot*100)/kt, float(main*100)/tot))
if ka == 0:
print(' {:6.2f}%'.format(float(main*100)/tot))
fout.write(' {:6.2f}%\n'.format(float(main*100)/tot))
else:
print(' {:6.2f}%'.format(float(nnc*100)/tot))
fout.write(' {:6.2f}%\n'.format(float(nnc*100)/tot))
else:
print(' ---')
fout.write(' ---\n')
fout.close()
print('Saving model using all cases')
Y = []
corpus = []
for filename in FILE_NAMES:
reader = utilFJML.read_file(os.path.join(FILE_PATH, filename))
print("Reading", FILE_PATH + filename)
for krec, rec in enumerate(reader):
Y.append(int(rec['mode'][0]))
corpus.append(rec['textInfo']['wordlist'])
vectorizer = TfidfVectorizer(min_df=1)
tfidf2 = vectorizer.fit_transform(corpus)
pickle.dump(vectorizer, open(VECTORZ_PFILE_NAME, "wb"))
X = tfidf2.toarray()
lin_clf = svm.LinearSVC()
lin_clf.fit(X, Y)
pickle.dump(lin_clf, open(MODEL_PFILE_NAME, "wb"))
print("Finished")
| 33.156398
| 139
| 0.592767
| 955
| 6,996
| 4.258639
| 0.300524
| 0.042046
| 0.017703
| 0.022129
| 0.388001
| 0.357512
| 0.288173
| 0.279813
| 0.253012
| 0.211212
| 0
| 0.03244
| 0.237707
| 6,996
| 210
| 140
| 33.314286
| 0.730171
| 0.194969
| 0
| 0.442857
| 0
| 0
| 0.171223
| 0.023827
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.071429
| 0.128571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab7bd35e838333316b3f6cced216348f59d4567c
| 658
|
py
|
Python
|
026_comprehension/list.py
|
rafael-torraca/python-100-days-of-code
|
3a5b3e32c5a3fd66a4fd726d378e0d2f746a3f30
|
[
"MIT"
] | null | null | null |
026_comprehension/list.py
|
rafael-torraca/python-100-days-of-code
|
3a5b3e32c5a3fd66a4fd726d378e0d2f746a3f30
|
[
"MIT"
] | null | null | null |
026_comprehension/list.py
|
rafael-torraca/python-100-days-of-code
|
3a5b3e32c5a3fd66a4fd726d378e0d2f746a3f30
|
[
"MIT"
] | null | null | null |
# for loop
numbers = [1, 2, 3, 4, 5]
new_list = []
for n in numbers:
add_1 = n + 1
new_list.append(add_1)
print(new_list)
# List Comprehension
new_list_comprehension = [n + 1 for n in numbers]
print(new_list_comprehension)
# Rang List Comprehension
range_list = [n * 2 for n in range(1, 5)]
print(range_list)
# Conditional List Comprehension
only_pairs = [n for n in range(1, 11) if n % 2 == 0]
print(only_pairs)
names = ["Alex", "Beth", "Caroline", "Dave", "Eleanor", "Freddie"]
short_names = [name for name in names if len(name) < 5]
print(short_names)
uppercase_names = [name.upper() for name in names if len(name) > 5]
print(uppercase_names)
| 25.307692
| 67
| 0.693009
| 112
| 658
| 3.919643
| 0.321429
| 0.079727
| 0.05467
| 0.059226
| 0.186788
| 0.132118
| 0.132118
| 0.132118
| 0.132118
| 0
| 0
| 0.034991
| 0.174772
| 658
| 26
| 68
| 25.307692
| 0.773481
| 0.12462
| 0
| 0
| 0
| 0
| 0.059441
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.352941
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab7d6681715869d89f5a128f73238e9596f37fcc
| 2,673
|
py
|
Python
|
tests/test_everywhere.py
|
b3b/herethere
|
0a9e667553bea4e7700b2b1af2827d08dded6ca5
|
[
"MIT"
] | null | null | null |
tests/test_everywhere.py
|
b3b/herethere
|
0a9e667553bea4e7700b2b1af2827d08dded6ca5
|
[
"MIT"
] | null | null | null |
tests/test_everywhere.py
|
b3b/herethere
|
0a9e667553bea4e7700b2b1af2827d08dded6ca5
|
[
"MIT"
] | null | null | null |
from io import StringIO
import os
from pathlib import Path
import pytest
from herethere.everywhere import ConnectionConfig, runcode
from herethere.everywhere import config
code_with_definition = """
def foo(a, b):
return a + b
print(foo(1, 2))
"""
@pytest.mark.parametrize(
"code, expected",
[
('print("1")', "1\n"),
('print("1")\nprint("2")', "1\n2\n"),
(code_with_definition, "3\n"),
],
)
def test_runcode_expected_result(code, expected):
assert runcode(code) == expected
def test_runcode_syntax_error():
assert "SyntaxError: invalid syntax" in runcode("syntax error here")
@pytest.mark.parametrize(
"code, expected",
[
('print("1")\nprint("2")', "1\n2\n"),
],
)
def test_runcode_expected_io(code, expected):
stdout = StringIO()
assert not runcode(code, stdout=stdout)
assert stdout.getvalue() == expected
def test_runcode_namespace_used():
assert "NameError:" in runcode("print(runcode_global_var)")
namespace = globals()
global runcode_global_var
runcode_global_var = 111
assert "NameError:" in runcode("print(runcode_global_var)")
assert runcode("print(runcode_global_var)", namespace=namespace) == "111\n"
assert (
runcode(
"runcode_global_var *= 3 ; print(runcode_global_var)", namespace=namespace
)
== "333\n"
)
assert runcode_global_var == 333
@pytest.mark.parametrize(
"path,env,expected",
(
(
"",
{
"THERE_HOST": "1",
"THERE_PORT": "2",
"THERE_USERNAME": "3",
"THERE_PASSWORD": "4",
},
ConnectionConfig("1", "2", "3", "4"),
),
(
"tests/connection.env",
{},
ConnectionConfig("localhost", "9022", "here", "there"),
),
),
)
def test_connection_config_loaded(path, env, expected, tmp_environ):
tmp_environ.update(env)
assert ConnectionConfig.load(path=path, prefix="there") == expected
def test_connection_not_found(tmp_environ):
with pytest.raises(config.ConnectionConfigError):
ConnectionConfig.load(path="no-such-config-here", prefix="there")
@pytest.mark.parametrize("prefix", ("", "test"))
def test_connection_config_saved(tmpdir, prefix):
path = Path(tmpdir) / "test-config-saved.env"
assert not os.path.exists(path)
with pytest.raises(config.ConnectionConfigError):
ConnectionConfig.load(path=path, prefix=prefix)
ConnectionConfig("localhost", "9022", "here", "there").save(path, prefix=prefix)
ConnectionConfig.load(path=path, prefix=prefix)
| 25.216981
| 86
| 0.626263
| 299
| 2,673
| 5.438127
| 0.257525
| 0.063961
| 0.078721
| 0.051661
| 0.390529
| 0.295203
| 0.202952
| 0.137761
| 0
| 0
| 0
| 0.020388
| 0.22933
| 2,673
| 105
| 87
| 25.457143
| 0.768932
| 0
| 0
| 0.222222
| 0
| 0
| 0.205013
| 0.061728
| 0
| 0
| 0
| 0
| 0.135802
| 1
| 0.08642
| false
| 0.012346
| 0.074074
| 0
| 0.17284
| 0.098765
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab7dc38ed8951fa7685f47dd886c5e04e6fb81b4
| 1,879
|
py
|
Python
|
src/predict.py
|
alphasea-dapp/alphasea-example-model
|
9d14072425a8e38cbef49de752fb4bd8bab0ad18
|
[
"CC0-1.0"
] | 8
|
2022-01-25T14:28:09.000Z
|
2022-03-31T04:35:27.000Z
|
src/predict.py
|
yuzi-ziyu/alphasea-example-model
|
9d14072425a8e38cbef49de752fb4bd8bab0ad18
|
[
"CC0-1.0"
] | null | null | null |
src/predict.py
|
yuzi-ziyu/alphasea-example-model
|
9d14072425a8e38cbef49de752fb4bd8bab0ad18
|
[
"CC0-1.0"
] | 8
|
2022-01-26T14:31:26.000Z
|
2022-03-23T16:11:06.000Z
|
import os
import re
import joblib
import numpy as np
import pandas as pd
import traceback
from .logger import create_logger
from .ml_utils import fetch_ohlcv, normalize_position
from .agent_api import submit_prediction
agent_base_url = os.getenv('ALPHASEA_AGENT_BASE_URL')
model_id = os.getenv('ALPHASEA_MODEL_ID')
model_path = os.getenv('ALPHASEA_MODEL_PATH')
log_level = os.getenv('ALPHASEA_LOG_LEVEL')
position_noise = float(os.getenv('ALPHASEA_POSITION_NOISE'))
if not re.match(r'^[a-z_][a-z0-9_]{3,30}$', model_id):
raise Exception('model_id must be ^[a-z_][a-z0-9_]{3,30}$')
def predict_job(dry_run=False):
logger = create_logger(log_level)
model = joblib.load(model_path)
# fetch data
interval_sec = 60 * 60
max_retry_count = 5
for _ in range(max_retry_count):
try:
df = fetch_ohlcv(symbols=model.symbols, logger=logger, interval_sec=interval_sec)
max_timestamp = df.index.get_level_values('timestamp').max()
df = df.loc[max_timestamp - pd.to_timedelta(model.max_data_sec, unit='S') <= df.index.get_level_values('timestamp')]
break
except Exception as e:
logger.error(e)
logger.error(traceback.format_exc())
logger.info('fetch_ohlcv error. retrying')
# predict
df['position'] = model.predict(df)
# add noise (for debug)
df['position'] += np.random.normal(0, position_noise, size=df.shape[0])
normalize_position(df)
# filter last timestamp
df = df.loc[df.index.get_level_values('timestamp') == max_timestamp]
# submit
if dry_run:
logger.info('dry run submit {}'.format(df))
else:
result = submit_prediction(
agent_base_url=agent_base_url,
model_id=model_id,
df=df,
prediction_license='CC0-1.0'
)
logger.info(result)
| 31.316667
| 128
| 0.668973
| 267
| 1,879
| 4.453184
| 0.374532
| 0.035324
| 0.067283
| 0.037847
| 0.174937
| 0.095879
| 0.070648
| 0
| 0
| 0
| 0
| 0.01355
| 0.214476
| 1,879
| 59
| 129
| 31.847458
| 0.792005
| 0.036722
| 0
| 0
| 0
| 0
| 0.143016
| 0.050998
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022222
| false
| 0
| 0.2
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab7dde0847107e7eb65decc348198fe2bd6560c4
| 545
|
py
|
Python
|
lesson8/lesson8.py
|
arsalanses/opencv-python
|
2ef56589559e44a7fea351cde513ef0320cb591e
|
[
"MIT"
] | 2
|
2019-01-05T12:40:32.000Z
|
2019-02-21T22:54:45.000Z
|
lesson8/lesson8.py
|
arsalanses/opencv-python
|
2ef56589559e44a7fea351cde513ef0320cb591e
|
[
"MIT"
] | null | null | null |
lesson8/lesson8.py
|
arsalanses/opencv-python
|
2ef56589559e44a7fea351cde513ef0320cb591e
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
img1 = cv2.imread('image.tif')
img2 = cv2.imread('imglogo.png')
rows, cols, channels = img2.shape
roi = img1[0:rows, 0:cols]
img2gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(img2gray, 230, 255, cv2.THRESH_BINARY_INV)
mask_inv = cv2.bitwise_not(mask)
img1_bg = cv2.bitwise_and(roi, roi, mask = mask_inv)
img2_fg = cv2.bitwise_and(img2, img2, mask = mask)
dst = cv2.add(img1_bg, img2_fg)
img1[0:rows, 0:cols] = dst
cv2.imshow('final', img1)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 20.185185
| 68
| 0.722936
| 92
| 545
| 4.152174
| 0.445652
| 0.078534
| 0.04712
| 0.052356
| 0.073298
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086498
| 0.130275
| 545
| 26
| 69
| 20.961538
| 0.719409
| 0
| 0
| 0
| 0
| 0
| 0.045872
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab7f5e28b56225a5e29228c37b55b000fd5d2d90
| 2,378
|
py
|
Python
|
firmware_flash/forms.py
|
fossabot/fermentrack
|
3070bc14791b1482ec661607005ebda961ca3a8f
|
[
"MIT"
] | 114
|
2017-03-19T22:51:45.000Z
|
2022-01-18T06:00:23.000Z
|
firmware_flash/forms.py
|
fossabot/fermentrack
|
3070bc14791b1482ec661607005ebda961ca3a8f
|
[
"MIT"
] | 392
|
2017-03-12T17:09:16.000Z
|
2022-03-31T22:08:45.000Z
|
firmware_flash/forms.py
|
fossabot/fermentrack
|
3070bc14791b1482ec661607005ebda961ca3a8f
|
[
"MIT"
] | 67
|
2017-03-19T18:11:54.000Z
|
2022-01-31T12:12:17.000Z
|
from django.contrib.auth.models import User
from django import forms
from constance import config
#from constance.admin import ConstanceForm
from django.conf import settings
from .models import DeviceFamily, Firmware, Board
###################################################################################################################
# Firmware Flash Forms
###################################################################################################################
class FirmwareFamilyForm(forms.Form):
DEVICE_FAMILY_CHOICES = (
)
device_family = forms.ChoiceField(label="Device Family",
widget=forms.Select(attrs={'class': 'form-control',
'data-toggle': 'select'}),
choices=DEVICE_FAMILY_CHOICES, required=True)
def __init__(self, *args, **kwargs):
super(FirmwareFamilyForm, self).__init__(*args, **kwargs)
for this_field in self.fields:
self.fields[this_field].widget.attrs['class'] = "form-control"
family_choices = [(fam.id, fam.name) for fam in DeviceFamily.objects.all()]
self.fields['device_family'].choices = family_choices
class BoardForm(forms.Form):
DEVICE_BOARD_CHOICES = (
)
board_type = forms.ChoiceField(label="Board Type",
widget=forms.Select(attrs={'class': 'form-control', 'data-toggle': 'select'}),
choices=DEVICE_BOARD_CHOICES, required=True)
def set_choices(self, family):
# There's probably a better way of doing this
board_choices = [(brd.id, brd.name) for brd in Board.objects.filter(family=family)]
self.fields['board_type'].choices = board_choices
# class GuidedDeviceFlashForm(forms.Form):
# DEVICE_FAMILY_CHOICES = GuidedDeviceSelectForm.DEVICE_FAMILY_CHOICES
#
# device_family = forms.ChoiceField(label="Device Family",
# widget=forms.Select(attrs={'class': 'form-control',
# 'data-toggle': 'select'}),
# choices=DEVICE_FAMILY_CHOICES, required=True)
# should_flash_device = forms.BooleanField(widget=forms.HiddenInput, required=False, initial=False)
#
#
| 40.305085
| 115
| 0.547519
| 220
| 2,378
| 5.754545
| 0.318182
| 0.094787
| 0.090047
| 0.066351
| 0.328594
| 0.299368
| 0.299368
| 0.299368
| 0.299368
| 0.299368
| 0
| 0
| 0.261564
| 2,378
| 58
| 116
| 41
| 0.720957
| 0.272918
| 0
| 0
| 0
| 0
| 0.088156
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.185185
| 0
| 0.481481
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab8344d8efbd20dfefa546d6c2aa1f9f9ddae6c5
| 664
|
py
|
Python
|
sallyforth/inliner.py
|
russolsen/sallyforth
|
480f6df6a5e2678829bc86b73f89c88565e28696
|
[
"Apache-2.0"
] | 13
|
2020-04-14T16:48:10.000Z
|
2022-02-04T22:18:00.000Z
|
sallyforth/inliner.py
|
russolsen/sallyforth
|
480f6df6a5e2678829bc86b73f89c88565e28696
|
[
"Apache-2.0"
] | 1
|
2020-06-13T12:56:14.000Z
|
2020-06-28T19:52:46.000Z
|
sallyforth/inliner.py
|
russolsen/sallyforth
|
480f6df6a5e2678829bc86b73f89c88565e28696
|
[
"Apache-2.0"
] | 1
|
2021-09-11T09:36:29.000Z
|
2021-09-11T09:36:29.000Z
|
from wrappers import inner_f
def compile_f(contents, attributes, doc, name):
new_contents = []
for f in contents:
sub_contents = getattr(f, "contents", None)
if sub_contents:
new_contents.extend(sub_contents)
else:
new_contents.append(f)
new_func = inner_f(new_contents)
if attributes:
new_func.__dict__ = attributes.copy()
new_func.__doc__ = doc
new_func.name = name
return new_func
def compile_word_f(f, name=None):
contents = getattr(f, 'contents', None)
if contents and len(contents) > 1:
return compile_f(contents, f.__dict__, f.__doc__, name)
return f
| 28.869565
| 63
| 0.653614
| 90
| 664
| 4.444444
| 0.322222
| 0.0875
| 0.08
| 0.12
| 0.15
| 0.15
| 0
| 0
| 0
| 0
| 0
| 0.00202
| 0.254518
| 664
| 22
| 64
| 30.181818
| 0.806061
| 0
| 0
| 0
| 0
| 0
| 0.024096
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.05
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab84ef6014bf90d094474b93c555171e51108d19
| 540
|
py
|
Python
|
bin/newick_reformat.py
|
mooninrain/viral-mutation
|
328182ca2acfe5d30bc149e0c95d919123db24cf
|
[
"MIT"
] | 79
|
2020-07-10T05:11:27.000Z
|
2022-03-30T14:16:07.000Z
|
bin/newick_reformat.py
|
mooninrain/viral-mutation
|
328182ca2acfe5d30bc149e0c95d919123db24cf
|
[
"MIT"
] | 7
|
2020-07-09T01:34:26.000Z
|
2022-03-09T05:36:44.000Z
|
bin/newick_reformat.py
|
mooninrain/viral-mutation
|
328182ca2acfe5d30bc149e0c95d919123db24cf
|
[
"MIT"
] | 35
|
2020-09-29T19:23:56.000Z
|
2022-03-13T04:59:57.000Z
|
import sys
newick = ''
with open(sys.argv[1]) as f:
orig = f.read().rstrip()
orig = orig.replace('(', '(\n').replace(',', '\n,\n').replace(')', ')\n')
for line in orig.split('\n'):
fields = line.rstrip().split(':')
if len(fields) == 0:
continue
elif len(fields) == 1:
newick += fields[0]
else:
prefix, suffix = fields
newick += prefix[:30]
if suffix != '':
newick += ':'
newick += suffix
with open(sys.argv[2], 'w') as of:
of.write(newick + '\n')
| 21.6
| 73
| 0.492593
| 69
| 540
| 3.855072
| 0.463768
| 0.090226
| 0.082707
| 0.112782
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018229
| 0.288889
| 540
| 24
| 74
| 22.5
| 0.674479
| 0
| 0
| 0
| 0
| 0
| 0.038889
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.052632
| 0
| 0.052632
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab861edeb3c8fda08cffdf8b9b6039df511fe550
| 1,830
|
py
|
Python
|
src/logging_with_motion_commander/logging_with_motion_commander.py
|
MarkAkbashev/crazyflie-dataset
|
819c476776ebc8b1c4b9a9139390ec3f1b0138e0
|
[
"MIT"
] | null | null | null |
src/logging_with_motion_commander/logging_with_motion_commander.py
|
MarkAkbashev/crazyflie-dataset
|
819c476776ebc8b1c4b9a9139390ec3f1b0138e0
|
[
"MIT"
] | null | null | null |
src/logging_with_motion_commander/logging_with_motion_commander.py
|
MarkAkbashev/crazyflie-dataset
|
819c476776ebc8b1c4b9a9139390ec3f1b0138e0
|
[
"MIT"
] | 1
|
2022-03-12T12:12:24.000Z
|
2022-03-12T12:12:24.000Z
|
# -*- coding: utf-8 -*-
#
# Based on bitcraze example project:
# https://github.com/bitcraze/crazyflie-lib-python/blob/master/examples/step-by-step/sbs_motion_commander.py
import logging
import sys
import time
from threading import Event
import cflib.crtp
from cflib.crazyflie import Crazyflie
from cflib.crazyflie.log import LogConfig
from cflib.crazyflie.syncCrazyflie import SyncCrazyflie
from cflib.positioning.motion_commander import MotionCommander
from cflib.utils import uri_helper
URI_interface = 'radio://0/80/2M/E7E7E7E7E7' # 'debug://0/1/E7E7E7E7E7' #
URI = uri_helper.uri_from_env(default=URI_interface)
DEFAULT_HEIGHT = 0.5
logging.basicConfig(level=logging.ERROR)
position_estimate = [0, 0]
def move_linear_simple(scf):
with MotionCommander(scf, default_height=DEFAULT_HEIGHT) as mc:
time.sleep(1)
mc.forward(0.5)
time.sleep(1)
mc.turn_left(180)
time.sleep(1)
mc.forward(0.5)
time.sleep(1)
def take_off_simple(scf):
with MotionCommander(scf, default_height=DEFAULT_HEIGHT) as mc:
time.sleep(3)
mc.stop()
def log_pos_callback(timestamp, data, logconf):
print(data)
global position_estimate
position_estimate[0] = data['stateEstimate.x']
position_estimate[1] = data['stateEstimate.y']
if __name__ == '__main__':
cflib.crtp.init_drivers()
with SyncCrazyflie(URI, cf=Crazyflie(rw_cache='./cache')) as scf:
logconf = LogConfig(name='Position', period_in_ms=10)
logconf.add_variable('stateEstimate.x', 'float')
logconf.add_variable('stateEstimate.y', 'float')
scf.cf.log.add_config(logconf)
logconf.data_received_cb.add_callback(log_pos_callback)
logconf.start()
take_off_simple(scf)
move_linear_simple(scf)
logconf.stop()
| 27.313433
| 108
| 0.712022
| 246
| 1,830
| 5.097561
| 0.418699
| 0.035885
| 0.031898
| 0.028708
| 0.153908
| 0.153908
| 0.153908
| 0.153908
| 0.153908
| 0.153908
| 0
| 0.02452
| 0.17541
| 1,830
| 66
| 109
| 27.727273
| 0.806494
| 0.102732
| 0
| 0.177778
| 0
| 0
| 0.072872
| 0.015922
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.222222
| 0
| 0.288889
| 0.022222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab8801ccef53e30715caf216a3164da6aa09d611
| 2,745
|
py
|
Python
|
discordbot/src/commands/track.py
|
knabb215/discord-masz
|
a1b8434ca8e6e31cb61a8a6069338fdd34698ea2
|
[
"MIT"
] | null | null | null |
discordbot/src/commands/track.py
|
knabb215/discord-masz
|
a1b8434ca8e6e31cb61a8a6069338fdd34698ea2
|
[
"MIT"
] | null | null | null |
discordbot/src/commands/track.py
|
knabb215/discord-masz
|
a1b8434ca8e6e31cb61a8a6069338fdd34698ea2
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from discord.errors import NotFound
from discord import Embed
from discord_slash.utils.manage_commands import create_option, SlashCommandOptionType
from data import get_invites_by_guild_and_code
from .infrastructure import record_usage, CommandDefinition, registered_guild_and_admin_or_mod_only
async def _track(ctx, code):
await registered_guild_and_admin_or_mod_only(ctx)
record_usage(ctx)
if "discord" not in code:
full_code = f"https://discord.gg/{code}"
else:
full_code = code
invites = await get_invites_by_guild_and_code(ctx.guild.id, full_code)
if not invites:
return await ctx.send("Invite not found in database.")
try:
creator = await ctx.bot.fetch_user(invites[0]["InviteIssuerId"])
except NotFound:
creator = None
invitees = {}
count = 0 # only do this for the first 20 users
for invite in invites:
if count > 20:
break
if invite["JoinedUserId"] not in invitees:
count += 1
invitees[invite["JoinedUserId"]] = await ctx.bot.fetch_user(invite["JoinedUserId"])
embed = Embed()
if creator:
embed.set_author(name=f"{creator.name}#{creator.discriminator}", icon_url=creator.avatar_url, url=creator.avatar_url)
embed.description = f"`{full_code}` was created by {creator.mention} at `{invites[0]['InviteCreatedAt'].strftime('%d %b %Y %H:%M:%S')}`."
else:
embed.description = f"`{full_code}` was created by `{creator.id}` at `{invites[0]['InviteCreatedAt'].strftime('%d %b %Y %H:%M:%S')}`."
used_by = ""
for invite in invites:
if len(used_by) > 900:
used_by += "[...]"
break
if invitees.get(invite['JoinedUserId']):
used_by += f"- `{invitees[invite['JoinedUserId']].name}#{invitees[invite['JoinedUserId']].discriminator}` `{invite['JoinedUserId']}` - `{invite['JoinedAt'].strftime('%d %b %Y %H:%M:%S')}`\n"
else:
used_by += f"- `{invite['JoinedUserId']}` - `{invite['JoinedAt'].strftime('%d %b %Y %H:%M:%S')}`\n"
embed.add_field(name=f"Used by [{len(invites)}]", value=used_by, inline=False)
embed.set_footer(text=f"Invite: {full_code}")
embed.timestamp = datetime.now()
return await ctx.send(embed=embed)
track = CommandDefinition(
func=_track,
short_help="Track an invite, its creator and its users.",
long_help="Track an invite in your guild, its creator and its users.\nEither enter the invite code or the url in the format `https://discord.gg/<code>`.",
usage="track <code|url>",
options=[
create_option("code", "the invite code or link.", SlashCommandOptionType.STRING, False)
]
)
| 40.367647
| 202
| 0.655373
| 367
| 2,745
| 4.760218
| 0.318801
| 0.082427
| 0.022896
| 0.025186
| 0.28277
| 0.212936
| 0.185461
| 0.148827
| 0.148827
| 0.098454
| 0
| 0.005495
| 0.204372
| 2,745
| 67
| 203
| 40.970149
| 0.794414
| 0.01275
| 0
| 0.125
| 0
| 0.089286
| 0.340842
| 0.123338
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.107143
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab8c19a59433d9a86423eb4aa00bb2f9b0bfb25f
| 285
|
py
|
Python
|
temp/teste.py
|
Aquino21/Pereira
|
accddd90bf6178060a448803489da3358ee94ba2
|
[
"MIT"
] | 1
|
2021-09-04T18:50:49.000Z
|
2021-09-04T18:50:49.000Z
|
temp/teste.py
|
Aquino21/Pereira
|
accddd90bf6178060a448803489da3358ee94ba2
|
[
"MIT"
] | null | null | null |
temp/teste.py
|
Aquino21/Pereira
|
accddd90bf6178060a448803489da3358ee94ba2
|
[
"MIT"
] | 3
|
2020-10-05T01:53:57.000Z
|
2021-09-14T23:57:47.000Z
|
list = ['Tiago','Ivan','Henrique', 'Paulo']
nome = 'teste'
nome = input('Digite um nome: ') # Python input prompt
print(nome)
for i in range(len(list)):
if list[i] == nome:
print(nome,'is number',i + 1,'on the list')
break
else:
print(nome,'is not on the list')
| 28.5
| 54
| 0.6
| 45
| 285
| 3.8
| 0.6
| 0.157895
| 0.128655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004484
| 0.217544
| 285
| 10
| 55
| 28.5
| 0.762332
| 0.066667
| 0
| 0
| 0
| 0
| 0.30566
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.3
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab8c74faa97d91935186d109db588267fe9dc817
| 3,221
|
py
|
Python
|
game_ai.py
|
arafat-ar13/Kivy-App
|
535de0a4f440a6ed97d08bd0b7d6f26e41af8bdb
|
[
"MIT"
] | 3
|
2021-01-03T05:27:31.000Z
|
2022-01-18T02:59:36.000Z
|
game_ai.py
|
arafat-ar13/Kivy-App
|
535de0a4f440a6ed97d08bd0b7d6f26e41af8bdb
|
[
"MIT"
] | null | null | null |
game_ai.py
|
arafat-ar13/Kivy-App
|
535de0a4f440a6ed97d08bd0b7d6f26e41af8bdb
|
[
"MIT"
] | 3
|
2020-12-06T08:33:04.000Z
|
2021-10-01T11:08:15.000Z
|
import numpy as np
import math
import random
class Ai():
def __init__(self, resolution, all_pos: list, all_ids, options: list):
self.board_resolution = resolution
self.all_pos = all_pos
self.player_option, self.ai_option = options
self.available_tiles = self.all_pos[:]
self.ai_move = []
self.id_array = np.array(all_ids)
self.id_array = np.reshape(self.id_array, (3, 3))
self.player_moves = []
self.ai_moves = []
# All the possible matches
# Board:
# np.array([1 2 3]
# [4 5 6]
# [7 8 9])
# Sideway matches
self.a_match_1 = self.id_array[0, 0], self.id_array[1, 1], self.id_array[2, 2] # (1, 5, 9)
self.a_match_2 = self.id_array[0, 2], self.id_array[1, 1], self.id_array[2, 0] # (3, 5, 7)
# Straight matches
self.a_match_3 = self.id_array[0, 0], self.id_array[1, 0], self.id_array[2, 0] # (1, 4, 7)
self.a_match_4 = self.id_array[0, 1], self.id_array[1, 1], self.id_array[2, 1] # (2, 5, 8)
self.a_match_5 = self.id_array[0, 2], self.id_array[1, 2], self.id_array[2, 2] # (3, 6, 9)
# Side matches
self.a_match_6 = self.id_array[0, 0], self.id_array[0, 1], self.id_array[0, 2] # (1, 2, 3)
self.a_match_7 = self.id_array[1, 0], self.id_array[1, 1], self.id_array[1, 2] # (4, 5, 6)
self.a_match_8 = self.id_array[2, 0], self.id_array[2, 1], self.id_array[2, 2] # (7, 8, 9)
self.all_matches = [self.a_match_1, self.a_match_2, self.a_match_3,
self.a_match_4,
self.a_match_5,
self.a_match_6,
self.a_match_7,
self.a_match_8,
]
def calculate_move(self, user_pos: list):
self.available_tiles.remove(user_pos)
if len(self.available_tiles) > 1:
self.ai_move = random.choice(self.available_tiles)
self.available_tiles.remove(self.ai_move)
def decide_winner(self, butt_dict, player_butt_id, ai_butt_id):
self.player_moves.append(player_butt_id)
self.ai_moves.append(ai_butt_id)
# won_buttons keep of the buttons to illuminate (or change color)
won_buttons = tuple()
won = ""
# Looping through all the matches and checking if any of those matches are in Player/Ai moves
for matches in self.all_matches:
if set(matches).issubset(self.player_moves) or set(matches).issubset(self.ai_moves):
won_buttons = matches
won = "Player" if set(matches).issubset(self.player_moves) else "Ai"
# This changes the color of the buttons that won the match
for button in butt_dict.values():
if button[1] in won_buttons:
button[0].background_normal = ""
# Green when the player wins and red when the Ai wins
button[0].background_color = (0, 1, 0, 1) if won == "Player" else (1, 0, 0, 1)
return won
| 43.527027
| 120
| 0.555107
| 484
| 3,221
| 3.469008
| 0.18595
| 0.096486
| 0.176891
| 0.057177
| 0.329958
| 0.223347
| 0.19059
| 0.148898
| 0.104824
| 0
| 0
| 0.05146
| 0.330332
| 3,221
| 74
| 121
| 43.527027
| 0.726936
| 0.167029
| 0
| 0
| 0
| 0
| 0.005257
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.0625
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab8fb94a81717790890513a389148cc7842737d4
| 3,063
|
py
|
Python
|
contrib/python/podman/libs/__init__.py
|
mavit/libpod
|
3f0e2367c222fe362031f806f002fb8a62be6360
|
[
"Apache-2.0"
] | null | null | null |
contrib/python/podman/libs/__init__.py
|
mavit/libpod
|
3f0e2367c222fe362031f806f002fb8a62be6360
|
[
"Apache-2.0"
] | null | null | null |
contrib/python/podman/libs/__init__.py
|
mavit/libpod
|
3f0e2367c222fe362031f806f002fb8a62be6360
|
[
"Apache-2.0"
] | null | null | null |
"""Support files for podman API implementation."""
import datetime
import re
import threading
__all__ = [
'cached_property',
'datetime_parse',
'datetime_format',
]
class cached_property(object):
"""cached_property() - computed once per instance, cached as attribute.
Maybe this will make a future version of python.
"""
def __init__(self, func):
"""Construct context manager."""
self.func = func
self.__doc__ = func.__doc__
self.lock = threading.RLock()
def __get__(self, instance, cls=None):
"""Retrieve previous value, or call func()."""
if instance is None:
return self
attrname = self.func.__name__
try:
cache = instance.__dict__
except AttributeError: # objects with __slots__ have no __dict__
msg = ("No '__dict__' attribute on {}"
" instance to cache {} property.").format(
repr(type(instance).__name__), repr(attrname))
raise TypeError(msg) from None
with self.lock:
# check if another thread filled cache while we awaited lock
if attrname not in cache:
cache[attrname] = self.func(instance)
return cache[attrname]
def datetime_parse(string):
"""Convert timestamp to datetime.
Because date/time parsing in python is still pedantically stupid,
we rip the input string apart throwing out the stop characters etc;
then rebuild a string strptime() can parse. Igit!
- Python >3.7 will address colons in the UTC offset.
- There is no ETA on microseconds > 6 digits.
- And giving an offset and timezone name...
# match: 2018-05-08T14:12:53.797795191-07:00
# match: 2018-05-08T18:24:52.753227-07:00
# match: 2018-05-08 14:12:53.797795191 -0700 MST
# match: 2018-05-09T10:45:57.576002 (python isoformat())
Some people, when confronted with a problem, think “I know,
I'll use regular expressions.” Now they have two problems.
-- Jamie Zawinski
"""
ts = re.compile(r'^(\d+)-(\d+)-(\d+)'
r'[ T]?(\d+):(\d+):(\d+).(\d+)'
r' *([-+][\d:]{4,5})? *')
x = ts.match(string)
if x is None:
raise ValueError('Unable to parse {}'.format(string))
# converting everything to int() not worth the readablity hit
igit_proof = '{}T{}.{}{}'.format(
'-'.join(x.group(1, 2, 3)),
':'.join(x.group(4, 5, 6)),
x.group(7)[0:6],
x.group(8).replace(':', '') if x.group(8) else '',
)
format = '%Y-%m-%dT%H:%M:%S.%f'
if x.group(8):
format += '%z'
return datetime.datetime.strptime(igit_proof, format)
def datetime_format(dt):
"""Format datetime in consistent style."""
if isinstance(dt, str):
return datetime_parse(dt).isoformat()
elif isinstance(dt, datetime.datetime):
return dt.isoformat()
else:
raise ValueError('Unable to format {}. Type {} not supported.'.format(
dt, type(dt)))
| 31.57732
| 78
| 0.595168
| 393
| 3,063
| 4.501272
| 0.506361
| 0.02035
| 0.024873
| 0.014698
| 0.016959
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051477
| 0.27065
| 3,063
| 96
| 79
| 31.90625
| 0.740376
| 0.366961
| 0
| 0
| 0
| 0
| 0.145109
| 0.01413
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075472
| false
| 0
| 0.056604
| 0
| 0.245283
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab9374dbdafd3eac8aaf116c21970aab394c9fbe
| 7,421
|
py
|
Python
|
girder_histogram/__init__.py
|
abcsFrederick/histogram
|
88eda7366e5d7a38272dbc3c2f919d7c207c870a
|
[
"Apache-2.0"
] | null | null | null |
girder_histogram/__init__.py
|
abcsFrederick/histogram
|
88eda7366e5d7a38272dbc3c2f919d7c207c870a
|
[
"Apache-2.0"
] | 1
|
2019-09-11T17:50:04.000Z
|
2019-09-11T17:50:04.000Z
|
girder_histogram/__init__.py
|
abcsFrederick/histogram
|
88eda7366e5d7a38272dbc3c2f919d7c207c870a
|
[
"Apache-2.0"
] | 1
|
2019-06-18T17:55:45.000Z
|
2019-06-18T17:55:45.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Girder plugin framework and tests adapted from Kitware Inc. source and
# documentation by the Imaging and Visualization Group, Advanced Biomedical
# Computational Science, Frederick National Laboratory for Cancer Research.
#
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import datetime
import json
from bson.objectid import ObjectId
from girder import plugin
from girder import events, logger
from girder.settings import SettingDefault
from girder.exceptions import ValidationException
from girder.models.item import Item
from girder.models.notification import Notification
from girder.utility import setting_utilities
from girder_jobs.constants import JobStatus
from girder_jobs.models.job import Job
from .constants import PluginSettings
from .rest import HistogramResource
from .models.histogram import Histogram
from girder.utility.model_importer import ModelImporter
def _onRemoveItem(event):
"""
When a resource containing histograms is about to be deleted, we delete
all of the histograms that are attached to it.
"""
histogramModel = Histogram()
for histogram in Histogram().find({'itemId': ObjectId(event.info['_id'])}):
histogramModel.remove(histogram)
def _onRemoveFile(event):
"""
When a histogram file is deleted, we remove the parent histogram.
"""
histogramModel = Histogram()
for histogram in Histogram().find({'fileId': ObjectId(event.info['_id'])}):
histogramModel.remove(histogram, keepFile=True)
def _onUpload(event):
"""
Histogram creation can be requested on file upload by passing a reference
'histogram' that is a JSON object of the following form:
{
"histogram": {
"bins": 255,
"label": True,
"bitmask": False
}
}
bins, label, and bitmask arguments are optional
"""
file_ = event.info['file']
user = event.info['currentUser']
token = event.info['currentToken']
if 'itemId' not in file_:
return
try:
ref = json.loads(event.info.get('reference', ''))
except (TypeError, ValueError):
return
if not isinstance(ref, dict):
return
if ref.get('isHistogram'):
# jobId = ref.get('jobId')
fakeId = ref.get('fakeId')
if not fakeId:
msg = 'Histogram file %s uploaded without fakeId reference.'
logger.warning(msg % file_['_id'])
return
histograms = list(Histogram().find({'fakeId': fakeId}, limit=2))
if len(histograms) == 1:
histogram = histograms[0]
del histogram['expected']
histogram['fileId'] = file_['_id']
Histogram().save(histogram)
else:
msg = 'Failed to retrieve histogram for file %s using fakeId %s.'
logger.warning(msg % (file_['_id'], fakeId))
return
elif isinstance(ref.get('histogram'), dict):
item = Item().load(file_['itemId'], force=True)
Histogram().createHistogram(item, file_, user, token,
**ref['histogram'])
def _updateJob(event):
"""
Called when a job is saved, updated, or removed. If this is a histogram
job and it is ended, clean up after it.
"""
if event.name == 'jobs.job.update.after':
job = event.info['job']
else:
job = event.info
meta = job.get('meta', {})
if (meta.get('creator') != 'histogram' or
meta.get('task') != 'createHistogram'):
return
status = job['status']
if event.name == 'model.job.remove' and status not in (
JobStatus.ERROR, JobStatus.CANCELED, JobStatus.SUCCESS):
status = JobStatus.CANCELED
if status not in (JobStatus.ERROR, JobStatus.CANCELED, JobStatus.SUCCESS):
return
histograms = list(Histogram().find({'fakeId': meta.get('fakeId')}, limit=2))
if len(histograms) != 1:
msg = 'Failed to retrieve histogram using fakeId %s.'
logger.warning(msg % meta.get('fakeId'))
return
histogram = histograms[0]
if histogram.get('expected'):
# We can get a SUCCESS message before we get the upload message, so
# don't clear the expected status on success.
if status != JobStatus.SUCCESS:
del histogram['expected']
notify = histogram.get('notify')
msg = None
if notify:
del histogram['notify']
if status == JobStatus.SUCCESS:
msg = 'Histogram created'
elif status == JobStatus.CANCELED:
msg = 'Histogram creation canceled'
else: # ERROR
msg = 'FAILED: Histogram creation failed'
msg += ' for item %s' % histogram['itemId']
msg += ', file %s' % histogram['fileId']
if status == JobStatus.SUCCESS:
Histogram().save(histogram)
else:
Histogram().remove(histogram)
if msg and event.name != 'model.job.remove':
Job().updateJob(job, progressMessage=msg)
if notify:
Notification().createNotification(
type='histogram.finished_histogram',
data={
'histogram_id': histogram['_id'],
'item_id': histogram['itemId'],
'file_id': histogram['fileId'],
'fakeId': histogram['fakeId'],
'success': status == JobStatus.SUCCESS,
'status': status
},
user={'_id': job.get('userId')},
expires=datetime.datetime.utcnow() + datetime.timedelta(seconds=30)
)
@setting_utilities.validator({
PluginSettings.DEFAULT_BINS,
})
def validateNonnegativeInteger(doc):
val = doc['value']
try:
val = int(val)
if val < 0:
raise ValueError
except ValueError:
msg = '%s must be a non-negative integer.' % doc['key']
raise ValidationException(msg, 'value')
doc['value'] = val
# Default settings values
SettingDefault.defaults.update({
PluginSettings.DEFAULT_BINS: 256,
})
class HistogramPlugin(plugin.GirderPlugin):
DISPLAY_NAME = 'Histogram'
CLIENT_SOURCE_PATH = 'web_client'
def load(self, info):
ModelImporter.registerModel('histogram', Histogram, 'histogram')
info['apiRoot'].histogram = HistogramResource()
events.bind('model.item.remove', 'Histogram', _onRemoveItem)
events.bind('model.file.remove', 'Histogram', _onRemoveFile)
events.bind('data.process', 'Histogram', _onUpload)
events.bind('jobs.job.update.after', 'Histogram', _updateJob)
events.bind('model.job.save', 'Histogram', _updateJob)
events.bind('model.job.remove', 'Histogram', _updateJob)
| 34.67757
| 80
| 0.622558
| 823
| 7,421
| 5.566221
| 0.320778
| 0.021829
| 0.01921
| 0.015717
| 0.153678
| 0.1253
| 0.080332
| 0.025322
| 0.025322
| 0
| 0
| 0.003573
| 0.245789
| 7,421
| 213
| 81
| 34.840376
| 0.814901
| 0.211966
| 0
| 0.2
| 0
| 0
| 0.160194
| 0.012585
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042857
| false
| 0
| 0.121429
| 0
| 0.242857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab938679789678606b7ac691201fd8e915036076
| 5,325
|
py
|
Python
|
app_run.py
|
michaellengyel/sheep_environment
|
6a272155b0d03597efc63097da2a96297b4345b3
|
[
"Apache-2.0"
] | null | null | null |
app_run.py
|
michaellengyel/sheep_environment
|
6a272155b0d03597efc63097da2a96297b4345b3
|
[
"Apache-2.0"
] | null | null | null |
app_run.py
|
michaellengyel/sheep_environment
|
6a272155b0d03597efc63097da2a96297b4345b3
|
[
"Apache-2.0"
] | null | null | null |
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten
from keras.optimizers import Adam
from keras.callbacks import TensorBoard
import tensorflow as tf
from tqdm import tqdm
from collections import deque
from gym.bug_env.environment import Environment
import numpy as np
import random
import time
import os
LOAD_MODEL = "benchmarked_models/2x256____34.40max___26.53avg___18.00min__1621204339.model"
DISCOUNT = 0.99
REPLAY_MEMORY_SIZE = 50_000 # How many last steps to keep for model training
MIN_REPLAY_MEMORY_SIZE = 1_000 # Minimum number of steps in a memory to start training
MINIBATCH_SIZE = 64 # How many steps (samples) to use for training
UPDATE_TARGET_EVERY = 5 # Terminal states (end of episodes)
MODEL_NAME = '2x256'
MIN_REWARD = 0 # For model save
MEMORY_FRACTION = 0.20
# Environment settings
EPISODES = 20_000
# Stats settings
AGGREGATE_STATS_EVERY = 50 # episodes
SHOW_PREVIEW = True
class DQNAgent:
def __init__(self, env):
# Main model (gets trained every step)
self.model = self.create_model(env)
# Target model (this is what we .predict against every step)
self.target_model = self.create_model(env)
self.target_model.set_weights(self.model.get_weights())
self.replay_memory = deque(maxlen=REPLAY_MEMORY_SIZE)
self.target_update_counter = 0
self.env = env
def create_model(self, env):
if LOAD_MODEL is not None:
print(f"Loading {LOAD_MODEL}")
model = load_model(LOAD_MODEL)
print(f"Model {LOAD_MODEL} loaded!")
return model
else:
print("ERROR! No model to load!")
def update_replay_memory(self, transition):
self.replay_memory.append(transition)
def get_qs(self, state, step):
return self.model.predict(np.array(state).reshape(-1, *state.shape)/255)[0]
def train(self, terminal_state, step):
if len(self.replay_memory) < MIN_REPLAY_MEMORY_SIZE:
return
minibatch = random.sample(self.replay_memory, MINIBATCH_SIZE)
current_states = np.array([transition[0] for transition in minibatch])/255
current_qs_list = self.model.predict(current_states)
new_current_states = np.array([transition[3] for transition in minibatch])/255
future_qs_list = self.target_model.predict(new_current_states)
# Feature sets (images from game)
X = []
# Labels (action we decide to take)
y = []
for index, (current_state, action, reward, new_current_states, done) in enumerate(minibatch):
if not done:
max_future_q = np.max(future_qs_list[index])
new_q = reward + DISCOUNT * max_future_q
else:
new_q = reward
current_qs = current_qs_list[index]
current_qs[action] = new_q
X.append(current_state)
y.append(current_qs)
self.model.fit(np.array(X) / 255, np.array(y), batch_size=MINIBATCH_SIZE, verbose=0, shuffle=False)
# Updating to determine if we want to update target_model yet
if terminal_state:
self.target_update_counter += 1
if self.target_update_counter > UPDATE_TARGET_EVERY:
self.target_model.set_weights(self.model.get_weights())
self.target_update_counter = 0
def main():
# Exploration settings
epsilon = 0 # not a constant, going to be decayed
EPSILON_DECAY = 0.99975
MIN_EPSILON = 0.001
print("Running Inference App...")
# For stats
ep_rewards = [-200]
# For more repetitive results
#random.seed(1)
#np.random.seed(1)
#tf.set_random_seed(1)
# Create models folder
if not os.path.isdir("models"):
os.mkdir("models")
# Create Environment
env = Environment(map_img_path="gym/bug_env/res/map_small_edge.jpg",
fov=15,
food_spawn_threshold=255,
percent_for_game_over=100,
steps_for_game_over=100,
wait_key=300,
render=True)
agent = DQNAgent(env)
for episode in tqdm(range(1, EPISODES+1), ascii=True, unit="episode"):
episode_reward = 0
step = 1
current_state = env.reset()
done = False
while not done:
if np.random.random() > epsilon:
action = np.argmax(agent.get_qs(current_state, step))
else:
action = np.random.randint(0, env.get_action_space_size())
new_state, reward, done = env.step(action)
episode_reward += reward
if SHOW_PREVIEW and not episode % 1:
env.render_map()
env.render_sub_map()
#agent.update_replay_memory((current_state, action, reward, new_state, done))
#agent.train(done, step)
current_state = new_state
step += 1
print("reward: {}".format(reward),
"total reward: {}".format(env.agent_reward),
"game_over: {}".format(done),
"total_gen_reward: {}".format(env.total_generated_rewards)
)
if __name__ == '__main__':
main()
| 29.91573
| 107
| 0.634554
| 687
| 5,325
| 4.684134
| 0.317322
| 0.03729
| 0.019888
| 0.028589
| 0.110006
| 0.029832
| 0.029832
| 0.029832
| 0.029832
| 0.029832
| 0
| 0.03013
| 0.276995
| 5,325
| 177
| 108
| 30.084746
| 0.805714
| 0.140469
| 0
| 0.063636
| 0
| 0
| 0.064807
| 0.024165
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054545
| false
| 0
| 0.109091
| 0.009091
| 0.2
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab94442244cf942959905073e49c32e22155b473
| 1,701
|
py
|
Python
|
languages/python/sort_algorithms/qsort.py
|
sergev/vak-opensource
|
e1912b83dabdbfab2baee5e7a9a40c3077349381
|
[
"Apache-2.0"
] | 34
|
2016-10-29T19:50:34.000Z
|
2022-02-12T21:27:43.000Z
|
languages/python/sort_algorithms/qsort.py
|
sergev/vak-opensource
|
e1912b83dabdbfab2baee5e7a9a40c3077349381
|
[
"Apache-2.0"
] | null | null | null |
languages/python/sort_algorithms/qsort.py
|
sergev/vak-opensource
|
e1912b83dabdbfab2baee5e7a9a40c3077349381
|
[
"Apache-2.0"
] | 19
|
2017-06-19T23:04:00.000Z
|
2021-11-13T15:00:41.000Z
|
# -*- coding: utf-8 -*-
'''
Быстрая сортировка
'''
from typing import MutableSequence, Callable
def _qsort(data: MutableSequence,
start: int,
end: int,
process_func: Callable[[MutableSequence, int, int], int]) -> None:
if start < end:
pivot_index = process_func(data, start, end)
_qsort(data, start, pivot_index - 1, process_func)
_qsort(data, pivot_index + 1, end, process_func)
def _process_end(data: MutableSequence, start: int, end: int) -> int:
i = start - 1
j = end
pivot = data[end]
while i < j:
i += 1
j -= 1
while data[i] < pivot:
i += 1
while data[j] > pivot and j > start:
j -= 1
if i < j:
data[i], data[j] = data[j], data[i]
data[i], data[end] = data[end], data[i]
return i
def _process_middle(data: MutableSequence, start: int, end: int) -> int:
i = start - 1
j = end + 1
pivot_index = (start + end) // 2
pivot = data[pivot_index]
while i < j:
if i < pivot_index:
i += 1
while data[i] < pivot and i < pivot_index:
i += 1
if j > pivot_index:
j -= 1
while data[j] > pivot and j > pivot_index:
j -= 1
if i < j:
data[i], data[j] = data[j], data[i]
if i == pivot_index:
pivot_index = j
elif j == pivot_index:
pivot_index = i
return i
def qsort_end(data: MutableSequence):
_qsort(data, 0, len(data) - 1, _process_end)
def qsort_middle(data: MutableSequence):
_qsort(data, 0, len(data) - 1, _process_middle)
| 22.381579
| 77
| 0.519694
| 226
| 1,701
| 3.769912
| 0.146018
| 0.152582
| 0.046948
| 0.09507
| 0.453052
| 0.360329
| 0.321596
| 0.274648
| 0.274648
| 0.171362
| 0
| 0.017447
| 0.359788
| 1,701
| 75
| 78
| 22.68
| 0.764922
| 0.024103
| 0
| 0.367347
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102041
| false
| 0
| 0.020408
| 0
| 0.163265
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab998ab0fd8ce78db72dba56f4ec48316662f98c
| 3,834
|
py
|
Python
|
app.py
|
sandeshk06/Latency_analysis
|
24c3d39eb53b1438ac3916963b6853f95d7e7a8a
|
[
"MIT"
] | null | null | null |
app.py
|
sandeshk06/Latency_analysis
|
24c3d39eb53b1438ac3916963b6853f95d7e7a8a
|
[
"MIT"
] | null | null | null |
app.py
|
sandeshk06/Latency_analysis
|
24c3d39eb53b1438ac3916963b6853f95d7e7a8a
|
[
"MIT"
] | 1
|
2022-03-25T23:54:15.000Z
|
2022-03-25T23:54:15.000Z
|
from flask import Flask,render_template,request
from flask_wtf.csrf import CSRFProtect
from check_domain_result import compute
from check_traceroute import get_traceroute_result
from check_mtr import get_mtr_result
from check_ping import get_ping_result
from geo_latency_info import *
import folium
import os
import logging
from waitress import serve
logging.basicConfig(filename="/var/log/latency_app.log",level = logging.INFO,format = '%(levelname)s %(asctime)s %(message)s',datefmt = '%Y-%m-%d %H:%M:%S',filemode = 'a')
logger = logging.getLogger()
secret_key=os.urandom(12)
app = Flask(__name__)
app.config['SECRET_KEY']=secret_key
csrf = CSRFProtect(app)
@app.route('/',methods=['GET'])
def home():
return render_template('home.html')
@app.route('/latency',methods=['GET','POST'])
def latency():
if request.method=='POST':
name=request.form['name']
if name == '':
return render_template('latency.html')
else:
result=compute(name)
result=[ int(res*1000) for res in result ]
return render_template('show_latency_result.html',url=name,result=result)
return render_template('latency.html')
@app.route('/traceroute',methods=['GET','POST'])
def traceroute():
if request.method=='POST':
name=request.form['name']
if name == '':
return render_template('traceroute.html')
else:
traceroute_result=get_traceroute_result(name)
mtr_result=get_mtr_result(name)
ping_result=get_ping_result(name)
return render_template('show_traceroute_result.html',url=name,traceroute_result=traceroute_result,mtr_result=mtr_result,ping_result=ping_result)
return render_template('traceroute.html')
@app.route('/geo_trace',methods=['GET','POST'])
def geo_trace():
if request.method=='POST':
name=request.form['name']
if name == '':
return render_template('geo_trace.html')
else:
#draw trace on map
#remove old map
#os.remove('templates/show_geo_trace.html')
#logging.info("removed templates/show_geo_trace.html")
m=folium.Map()
Cordinate_list=[]
geo_obj=GeoTrace(name)
SOURCE_LOCATION,TRACES=geo_obj.get_geo_traceroute_data()
Cordinate_list.append(list(SOURCE_LOCATION))
if TRACES:
for hop,cordinate in sorted(TRACES.items()):
Cordinate_list.append(cordinate)
coordinates =[Cordinate_list]
logger.info(coordinates)
m = folium.Map(location=SOURCE_LOCATION, zoom_start=4)
folium.TileLayer('stamentoner').add_to(m)
folium.TileLayer('stamenterrain').add_to(m)
folium.TileLayer('openstreetmap').add_to(m)
folium.map.LayerControl().add_to(m)
COORDINATE_LIST=coordinates[0]
#add marker
count=0
for i in range(0,len(COORDINATE_LIST)):
lat=COORDINATE_LIST[i][0]
lon=COORDINATE_LIST[i][1]
count+=1
folium.Marker(location=[lat,lon],popup=('Route {}'.format(count)),icon = folium.Icon(color='green',icon='plus')).add_to(m)
my_PolyLine=folium.PolyLine(locations=coordinates,weight=5,color='red')
m.add_child(my_PolyLine)
f_name=name.split('.')[0]
file_name='show_geo_trace_'+str(f_name)+'.html'
saved_file_name=os.path.join('templates/',file_name)
m.save(saved_file_name)
logging.info("saved new result to geo_trace.html")
return render_template(file_name)
return render_template('geo_trace.html')
if __name__=='__main__':
serve(app,port=5000,threads=100)
| 30.188976
| 171
| 0.637454
| 476
| 3,834
| 4.911765
| 0.285714
| 0.065868
| 0.085543
| 0.051326
| 0.180496
| 0.102652
| 0.102652
| 0.082121
| 0.082121
| 0.082121
| 0
| 0.007516
| 0.236568
| 3,834
| 126
| 172
| 30.428571
| 0.791254
| 0.035472
| 0
| 0.216867
| 0
| 0
| 0.118874
| 0.020309
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048193
| false
| 0
| 0.13253
| 0.012048
| 0.301205
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab9ab7577f2c53a3e653bf436c3fb0f234794685
| 2,111
|
py
|
Python
|
third_party/pixels/pixels_aiy_v1.py
|
wangpy/archspee
|
97855f903106fba567ffda8cdc25b061cd8bdf5e
|
[
"MIT"
] | 8
|
2019-01-22T13:03:40.000Z
|
2021-12-30T22:11:12.000Z
|
third_party/pixels/pixels_aiy_v1.py
|
wangpy/archspee
|
97855f903106fba567ffda8cdc25b061cd8bdf5e
|
[
"MIT"
] | null | null | null |
third_party/pixels/pixels_aiy_v1.py
|
wangpy/archspee
|
97855f903106fba567ffda8cdc25b061cd8bdf5e
|
[
"MIT"
] | null | null | null |
import time
import queue
import threading
import RPi.GPIO as GPIO
_GPIO_PIN = 25
class Pixels:
def __init__(self):
self.is_light_on = 1
self.count_down = 0
self.light_on_count = 0
self.light_off_count = 0
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(_GPIO_PIN, GPIO.OUT)
self.queue = queue.Queue()
self.off()
self.thread = threading.Thread(target=self._run)
self.thread.daemon = True
self.thread.start()
def wakeup(self, direction=0):
self.queue.put((1, 1))
def listen(self):
self.queue.put((5, 5))
def think(self):
self.queue.put((3, 3))
def speak(self):
self.queue.put((10, 0))
def off(self):
self.queue.put((0, 10))
def _set_light_on(self, on):
pos = GPIO.HIGH if on else GPIO.LOW
GPIO.output(_GPIO_PIN, pos)
def _run(self):
while True:
while not self.queue.empty():
(self.light_on_count, self.light_off_count) = self.queue.get()
self.is_light_on = 0
self.count_down = 0
while self.queue.empty():
if self.count_down == 0:
self.is_light_on = not self.is_light_on
if self.is_light_on:
self.count_down = self.light_on_count
else:
self.count_down = self.light_off_count
if self.count_down == 0:
continue
self._set_light_on(self.is_light_on)
time.sleep(0.1)
self.count_down -= 1
pixels = Pixels()
if __name__ == '__main__':
while True:
try:
pixels.wakeup()
time.sleep(3)
pixels.listen()
time.sleep(3)
pixels.think()
time.sleep(3)
pixels.speak()
time.sleep(3)
pixels.off()
time.sleep(3)
except KeyboardInterrupt:
break
pixels.off()
time.sleep(1)
| 23.988636
| 78
| 0.516817
| 263
| 2,111
| 3.931559
| 0.235741
| 0.074468
| 0.088008
| 0.075435
| 0.094778
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024465
| 0.380388
| 2,111
| 87
| 79
| 24.264368
| 0.766055
| 0
| 0
| 0.191176
| 0
| 0
| 0.00379
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.058824
| 0
| 0.191176
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab9c4d7e47d9e31946148c6e5a43e198f83c9fd5
| 40,231
|
py
|
Python
|
edexOsgi/com.raytheon.uf.common.aviation/utility/common_static/base/aviation/python/TAMPGenerator.py
|
srcarter3/awips2
|
37f31f5e88516b9fd576eaa49d43bfb762e1d174
|
[
"Apache-2.0"
] | null | null | null |
edexOsgi/com.raytheon.uf.common.aviation/utility/common_static/base/aviation/python/TAMPGenerator.py
|
srcarter3/awips2
|
37f31f5e88516b9fd576eaa49d43bfb762e1d174
|
[
"Apache-2.0"
] | null | null | null |
edexOsgi/com.raytheon.uf.common.aviation/utility/common_static/base/aviation/python/TAMPGenerator.py
|
srcarter3/awips2
|
37f31f5e88516b9fd576eaa49d43bfb762e1d174
|
[
"Apache-2.0"
] | 1
|
2021-10-30T00:03:05.000Z
|
2021-10-30T00:03:05.000Z
|
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
#
# Name:
# TAMPGenerator.py
# GFS1-NHD:A10032.0000-SCRIPT;10
#
# Status:
# DELIVERED
#
# History:
# Revision 10 (DELIVERED)
# Created: 07-AUG-2009 10:22:17 OBERFIEL
# Use of CB is now constrained LTE a configurable height used
# by TAFGen
# when creating guidance TAFs.
#
# Revision 9 (DELIVERED)
# Created: 17-JUL-2009 16:40:07 OBERFIEL
# Viewers now use a resource to determine the Routine button
# setting.
#
# Revision 8 (REVIEW)
# Created: 05-MAY-2009 14:54:52 OBERFIEL
# Additional filter for cloud layers
#
# Revision 7 (DELIVERED)
# Created: 01-MAY-2009 13:57:47 OBERFIEL
# Added exception handling when VRB is encountered during
# wind averaging.
#
# Revision 6 (DELIVERED)
# Created: 01-AUG-2008 15:44:45 OBERFIEL
# Synch'd up with changes in OB8.3.1
#
# Revision 5 (DELIVERED)
# Created: 19-JUN-2008 14:20:42 OBERFIEL
# Allowed variable length of TAF -- not just 24h.
#
# Revision 4 (DELIVERED)
# Created: 18-APR-2008 14:19:09 OBERFIEL
# Numerous enhancements more robust error checking added.
#
# Revision 3 (DELIVERED)
# Created: 18-MAR-2008 14:40:28 OBERFIEL
# Fixed numerous formatting errors when TAFs are combined.
#
# Revision 2 (DELIVERED)
# Created: 14-MAR-2008 14:56:22 OBERFIEL
# Fixed some wind and cloud bugs
#
# Revision 1 (DELIVERED)
# Created: 20-NOV-2007 16:36:36 OBERFIEL
# New module to create TAF combined with GFSLAMP guidance.
#
# Change Document History:
# 1:
# Change Document: GFS1-NHD_SPR_7417
# Action Date: 06-OCT-2009 09:42:01
# Relationship Type: In Response to
# Status: CLOSED
# Title: AvnFPS: TUG code does not handle transition from warm to cold seasons
#
#
##
# This is a base file that is not intended to be overridden.
##
import copy, math, re, time
from itertools import groupby
import Avn, AvnLib, TafGen
#
# LAMP categories for ceiling and visibility
#
_LAMPCeilings=[0,200,500,1000,2000,3100,6600,12100]
_LAMPVisibilities=[0.,.5,1.0,2.0,3.0,5.1,6.1]
#
# Upper and lower limits within LAMP categories, allowed for insertion into TAF
#
_LAMPNewCeilings=[(),(0,1),(2,4),(5,9),(10,19),(20,30),(35,60),(70,120),(250,250)]
_LAMPNewVisibilities=[(),(0.1,0.25),(0.25,0.75),(1.0,1.5),(2.0,2.0),(3.0,5.0),(6.0,6.0),(12.0,12.0)]
re_TS = re.compile('TS\s?')
#
# Custom exception class
class NoEntries(Exception):
"Used when empty list is found"
class OcnlTimeLimit(Exception):
"Used when conditional group reaches its maximum hours in length"
def _findBestCategory(probabilities,thresholds):
cat=len(probabilities)
for cat,probability,threshold in enumerate(probabilities,thresholds):
if probability >= threshold:
break
return cat
def _findCategory(probability,thresholds):
cat=len(thresholds)
for cat,threshold in enumerate(thresholds):
if probability >= threshold:
break
return cat
def _inCategory(cat,thresholds,probabilities,delta):
try:
t=thresholds[cat]
except IndexError:
t=thresholds[-1]
return abs(t-probabilities[cat]) < t*delta
def _getCldHt(cldlyr):
try:
if cldlyr.endswith('CB'):
return int(cldlyr[-5:-2])*100
else:
return int(cldlyr[-3:])*100
except ValueError:
return 25000
def _nearesthr(t=0):
"""Top of the hour nearest within 30 minutes"""
hr=3600
d=t%hr
if d<1800:
hr=0
return t+hr-d
def _checkOcnlGrp(TAFLines):
"""Check the TEMPO/PROB30 group for elimination"""
#
# Check for last two strings in list
try:
if (TAFLines[-1].startswith('TEMPO') or TAFLines[-1].startswith('PROB30')):
pass
else:
return
except IndexError:
return
#
# Parse the last two lines into seperate tokens
ocnltokens = TAFLines[-1].split(' ')[2:]
prvlngtokens = TAFLines[-2].split(' ')[1:]
#
# If differences are found return, otherwise remove occasional group.
for token in ocnltokens:
if token not in prvlngtokens:
break
else:
TAFLines.pop(-1)
return
def _prjAverage(prjs,tafDuration=24):
"""Average elements together"""
#
# Initialization
TAFStrings = []
ocnlgrp = []
spos = n = 0
#
for n,prj in enumerate(prjs):
if prj.tocnl and prj.tocnl['type']:
#
# Make sure its of the same type and continuous in time
try:
if ocnlgrp[2] != prj.tocnl['type'] or \
ocnlgrp[0] + ocnlgrp[1] != n:
raise IndexError
if ocnlgrp[1:] == [4,'TEMPO'] or ocnlgrp[1:] == [6,'PROB']:
raise OcnlTimeLimit
ocnlgrp[1] += 1
#
# If occasional group is discontinuous in time or exceeds four
# hours in length
#
except (IndexError, OcnlTimeLimit):
if ocnlgrp:
# start position of occasional group and its duration
ospos,duration = ocnlgrp[:2]
TAFStrings.append(' '.join(_buildString([proj.tprev for proj in prjs[spos:n]],
prjs[spos].lampdata,tafDuration)))
TAFStrings.append(' '.join(_buildString([prjs[ospos+x].tocnl
for x in xrange(duration)])))
_checkOcnlGrp(TAFStrings)
spos = n
ocnlgrp = [n,1,prj.tocnl['type']]
#
prjs[spos].tprev['type']='FM'
if spos == n:
_tafstring = ' '.join(_buildString([prjs[spos].tprev],prjs[spos].lampdata,tafDuration))
else:
_tafstring = ' '.join(_buildString([prj.tprev for prj in prjs[spos:n]],
prjs[spos].lampdata,tafDuration))
TAFStrings.append(_tafstring)
try:
ospos,duration = ocnlgrp[:2]
_tafstring = ' '.join(_buildString([prjs[ospos+n].tocnl for n in xrange(duration)]))
TAFStrings.append(_tafstring)
_checkOcnlGrp(TAFStrings)
except (ValueError,TypeError):
pass
return TAFStrings
def _avgNSW(alist):
"""Just return NSW if present"""
if alist:
return 'NSW'
raise NoEntries
def _avgLLWS(alist):
"""Just return the first one, if present"""
if alist:
return alist[0]['str']
raise NoEntries
def _mostFrequent(alist):
"""Returns most frequent string found in list"""
strings = [element.get('str','') for element in alist]
return _freqList(strings)[-1]
def _freqList(alist):
"""Count unique items in list"""
if alist:
freqs = [(len(list(g)),k) for k, g in groupby(sorted(alist))]
return [b for a, b in sorted(freqs)]
raise NoEntries
def _avgWind(winds):
"""Average winds"""
n = len(winds)
if n == 0:
raise NoEntries
elif n == 1:
return winds[0]['str']
wspd = [x['ff'] for x in winds]
wdir = [x['dd'] for x in winds]
gsts = []
for x in winds:
try:
gsts.append(x['gg'])
except KeyError:
continue
ff = int(float(sum(wspd))/len(wspd)+0.5)
try:
gg = sum(gsts)/len(gsts)
except ZeroDivisionError:
gg = 0
pass
dd = 'VRB'
if ff > 3:
try:
u = sum([math.cos(math.radians(270 - dir))*spd \
for dir,spd in zip(wdir,wspd)])
v = sum([math.sin(math.radians(270 - dir))*spd \
for dir,spd in zip(wdir,wspd)])
dd = math.degrees(math.atan2(v,u))
if dd >= -90:
dd = 270-dd
else:
dd = -90-dd
dd = '%03d' % (10*((dd+5)//10))
if dd == '000':
dd = '360'
except TypeError:
pass
elif ff < 3:
dd = '000'
ff = 0
if gg - ff > 5:
return '%s%02dG%02dKT' % (dd,ff,gg)
else:
return '%s%02dKT' % (dd,ff)
def _avgSky(clouds):
"""Returns most frequently occurring layers"""
#
cldlayers=[]
newstring=[]
#
allrpts=Avn.flatten([y.split() for y in [x['str'] for x in clouds]])
toofew = len(allrpts)/4
cldlayers=[rpt for num, rpt in [(len(list(g)),k) for k, g in groupby(sorted(allrpts))] if num > toofew]
if len(cldlayers) == 0:
mostfreqlyrs=_freqList(allrpts)
else:
mostfreqlyrs=_freqList(cldlayers)
if mostfreqlyrs[-1] == 'SKC':
return 'SKC'
#
# Remove all occurrences of SKC
while True:
try:
ignored = mostfreqlyrs.pop(mostfreqlyrs.index('SKC'))
except:
break
#
# Remove layers at the same height
uniqclddict=dict([(_getCldHt(x),x) for x in mostfreqlyrs])
#
# Prepare to order the most frequent layers
lyrs = dict([(x,0) for x in ['OVC','BKN','SCT','FEW','VV']])
sortedlayers=sorted([_getCldHt(x) for x in mostfreqlyrs[-4:]])
lastheight = 0
for height in sortedlayers:
try:
strng = uniqclddict[height]
except KeyError:
continue
if height <= lastheight:
continue
lastheight = height
coverage=strng[:3]
if strng.startswith('V'):
coverage='VV'
#
# Don't allow VV, SCT or FEW if a BKN is present
if lyrs['BKN']:
if coverage in ['FEW','SCT','VV']:
continue
#
# Don't allow FEW or VV above a SCT layer
if lyrs['SCT']:
if coverage in ['FEW','VV']:
continue
try:
lyrs[coverage] += 1
newstring.append(strng)
except KeyError:
continue
#
# First overcast or VV stops the loop.
if lyrs['OVC'] or lyrs['VV']:
break
#
# Or two BKN layers
if lyrs['BKN'] == 2:
break
#
# Three cloud layers results in breakout
if lyrs['FEW'] + lyrs['SCT'] + lyrs['BKN'] > 2:
break
return ' '.join(newstring)
def _buildString(elements,lampdata=None,tafDuration=24):
"""Examine each element in TAF and come up with a string"""
#
# The presence of lampdata indicates a FM group, bleh.
rec = {}
rec['time'] = { 'from':elements[0]['time']['from'] }
rec['type'] = elements[0]['type']
if not lampdata:
rec['time']['to']=elements[-1]['time']['to']
for _function,_element in [(_avgWind,'wind'),(_mostFrequent,'vsby'),
(_mostFrequent,'pcp'),(_mostFrequent,'obv'),
(_avgSky,'sky'),(_avgNSW,'nsw'),
(_mostFrequent,'vcnty'),(_avgLLWS,'llws')]:
try:
rec[_element]={'str':_function([v[_element] for v in elements if v.has_key(_element)])}
except NoEntries:
pass
#
# Sanity checks
try:
if rec['vsby']['str'] == 'P6SM':
del rec['obv']
except KeyError:
pass
#
#
try:
if rec['nsw']:
if rec['type'] in ['FM','PROB']:
del rec['nsw']
elif rec['pcp']:
del rec['nsw']
except KeyError:
pass
try:
if re_TS.match(rec['pcp']['str']):
for lyr in rec['sky']['str'].split():
if lyr.endswith('CB'):
break
else:
rec['sky']['str'] += 'CB'
except KeyError:
pass
if lampdata and lampdata.has_key('ident'):
return AvnLib.formatRecForMAIN(rec,lampdata['ident'],lampdata['amd'],
tafDuration=tafDuration)
else:
if rec['type'] == 'FM':
return AvnLib.formatRecForFM(rec)
elif len(rec.keys()) > 2:
return AvnLib.formatRecForOCNL(rec)
def _addPrj(grpnum,pos,currentFltCat,nextFltCat,prevFltCat):
"""Almost always insures that that two or more of the same flight category exists"""
#
# Beginning hour of TAF is always added, regardless of any impending flight category
# change
#
if grpnum == 0 and pos == 0:
return True
elif pos:
return currentFltCat in [nextFltCat,prevFltCat]
else:
return currentFltCat == nextFltCat
def _summarizeTAFPrjs(TAFPrjs,TAFData,tafDuration=24):
"""Attempt to group based on previous TAF"""
#
# Initialization
TAFText = []
#
# Save Station Identifier and whether its an amendment
try:
ident=TAFPrjs[0].lampdata['ident']
amd=TAFPrjs[0].lampdata['amd']
except (KeyError, IndexError):
return
#
# Start with breakpoints in the official TAF
for grpnum,grp in enumerate(TAFData):
try:
shr = grp['prev']['time']['from']
ehr = grp['prev']['time']['to']
except KeyError:
continue
#
# Identify those projections and the flight category
# they correspond to.
#
prjs = [(x.flightCategory(),x) for x in TAFPrjs if shr <= x.vtime < ehr]
#
for n,cat in enumerate(prjs):
if n == 0:
numPrjs = len(prjs)-1
prjs2avg = []
crntPrj = cat[0]
nextPrj = prjs[min(n+1,numPrjs)][0]
prevPrj = prjs[max(0,n-1)][0]
if _addPrj(grpnum,n,crntPrj,nextPrj,prevPrj):
#
prjs2avg.append(cat[1])
#
# If there's a change in flight category ahead,
# average the projections gathered so far
#
if crntPrj != nextPrj:
if TAFText == []:
prjs2avg[0].lampdata['ident']=ident
prjs2avg[0].lampdata['amd']=amd
TAFText.extend(_prjAverage(prjs2avg,tafDuration))
prjs2avg = []
#
if prjs and prjs2avg:
if TAFText == []:
prjs2avg[0].lampdata['ident']=ident
prjs2avg[0].lampdata['amd']=amd
TAFText.extend(_prjAverage(prjs2avg,tafDuration))
return TAFText
class TUGPrj:
def __init__(self,**kwds):
self.__dict__.update(kwds)
try:
self.tprev['time']['from']=self.vtime
self.tprev['time']['to']=self.vtime+3600.0
except KeyError:
pass
try:
self.tocnl['time']['from']=self.vtime
self.tocnl['time']['to']=self.vtime+3600.0
except KeyError:
pass
self.wet = self._isWet()
self.pcpn_changed = self.changed = False
def checkSky(self,tafGrpInstructions,dthresholds={'up':.1,'down':.1},
wthresholds={'up':.1,'down':.1}):
"""Make changes to ceiling when guidance strongly differs"""
maxcbhgt=tafGrpInstructions.get('cbhight',50)
#
# For prevailing and occasional groups, adjust if necessary
try:
for group in [self.tprev,self.tocnl]:
if self._isGroupWet(group):
self._checkCeiling(group['sky'],wthresholds,maxcbhgt,True)
else:
self._checkCeiling(group['sky'],dthresholds,maxcbhgt,False)
#
# Determine if the sky condition is duplicated.
new_OcnlSky = []
for layer in self.tocnl['sky']['str'].split():
if not (layer.endswith('CB') or layer.startswith('VV')) and \
layer in self.tprev['sky']['str']:
continue
new_OcnlSky.append(layer)
if len(new_OcnlSky) == 0:
del self.tocnl['sky']
except KeyError:
pass
def _checkCeiling(self,taf,deltas,maxcbhgt,wet=False):
"""Adjust ceilings if necessary"""
#
if wet:
lamp=self.lampdata['csky']
lampBestCat=self.lampdata['ccig_bestCat']
probabilities=self.lampdata['ccprob']
thresholds=self.ccigthr
else:
lamp=self.lampdata['sky']
lampBestCat=self.lampdata['cig_bestCat']
probabilities=self.lampdata['cprob']
thresholds=self.cigthr
tcat = Avn.category(taf['cig'],_LAMPCeilings)
if tcat == lampBestCat:
return
#
# If LAMP and TAF both do not have a ceiling, return early
if lamp['cig'] == taf['cig'] == 99999:
return
#
# Adjust thresholds, determine if we can hit taf's category.
if tcat > lampBestCat and _inCategory(lampBestCat,thresholds,probabilities,deltas['up']):
return
if tcat < lampBestCat and _inCategory(tcat,thresholds,probabilities,deltas['down']):
return
#
# Otherwise, the guidance 'strongly' disagrees with TAF
self.cig_changed = self.changed = True
newsky = []
newceiling = []
#
# Preserve CB in sky condition, cb_skyamt serves as a flag as well
cb_skyamt = None
for lyr in taf['str'].split():
if lyr.endswith('CB'):
cb_skyamt = lyr[:3]
#
# Find layers at or below LAMP ceiling category
if lampBestCat < tcat:
# They have to be FEW or SCT layers
for layer in [x for x in taf['str'].split() if Avn.category(_getCldHt(x),_LAMPCeilings) <= lampBestCat]:
# SCT layers that match LAMP category, change to BKN
if layer[:3] == 'SCT' and Avn.category(_getCldHt(layer),_LAMPCeilings) == lampBestCat:
newceiling.append('BKN%03d' % int(_getCldHt(layer)*0.01))
else:
newsky.append(layer)
#
# If no ceiling found in LAMP category add one
if not newceiling:
maxCeiling = _LAMPNewCeilings[lampBestCat][1]
if lamp['str'] != 'SKC':
newceiling.append(lamp['str'][:3]+'%03d'%maxCeiling)
else:
newceiling.append(lamp['str'])
cb_skyamt = None
newsky = []
#
newsky.extend(newceiling)
else:
# Remove ceilings below lamp category, leave FEW and SCT alone
newsky = [x for x in taf['str'].split()
if x[:3] in ['FEW','SCT'] and \
Avn.category(_getCldHt(x),_LAMPCeilings) < lampBestCat]
newceiling = [x for x in taf['str'].split()
if Avn.category(_getCldHt(x),_LAMPCeilings) == lampBestCat]
#
if not newceiling:
if lamp['str']=='SKC':
newsky=['SKC']
else:
newsky.extend([lamp['str'][:3]+'%03d'%(_LAMPNewCeilings[lampBestCat][0])])
else:
newsky.extend(newceiling)
if cb_skyamt:
#
# If there's already a CB present, break
for i, lyr in enumerate(newsky):
if lyr.endswith('CB'):
break
else:
#
# If there's a cloud amount that matches the original TAF CB amount and its
# below a configurable max CB height
#
for i, lyr in enumerate(newsky):
try:
if cb_skyamt == lyr[:3] and int(lyr[3:6]) <= maxcbhgt:
newsky[i]+='CB'
break
except (ValueError,IndexError):
pass
else:
#
# Otherwise, use the first acceptable layer found below a configurable
# max CB height
#
for i, lyr in enumerate(newsky):
try:
if lyr[:3] in ['SCT','BKN','OVC'] and int(lyr[3:6]) <= maxcbhgt:
newsky[i]+='CB'
break
except (ValueError,IndexError):
pass
taf['str'],taf['cig'] = ' '.join(newsky), _getCldHt(newsky[-1])
def checkVsby(self,dthresholds={'up':.1,'down':.1},wthresholds={'up':.1,'down':.1}):
"""Make changes to visibility when guidance disagrees"""
# For prevailing and occasional groups, adjust if necessary
try:
for group in [self.tprev,self.tocnl]:
if self._isGroupWet(group):
self._checkVisibility(group,wthresholds,True)
else:
self._checkVisibility(group,dthresholds,False)
except KeyError:
pass
def _adjustSNDZIntensity(self,pcpn_str,intensity=None):
"""Based on visibility, the intensity of snow or drizzle may need to be adjusted"""
newPcpnStr = []
for pcp in pcpn_str.split():
result = re.compile('(?P<Pint>[+-])?[A-Z]{,6}(DZ|SN)').match(pcp)
#
# If SN and/or drizzle present
if result:
oldintensity = None
try:
oldintensity = result.group('Pint')
except AttributeError:
pass
if intensity == oldintensity:
return pcpn_str
elif intensity and not oldintensity:
newPcpnStr.append('%c%s' % (intensity,pcp))
elif oldintensity and not intensity:
newPcpnStr.append(pcp[1:])
else:
newPcpnStr.append('%c%s' % (intensity,pcp[1:]))
else:
newPcpnStr.append(pcp)
return ' '.join(newPcpnStr)
def _checkVisibility(self,taf,deltas,wet=False):
"""Adjust ceilings if necessary"""
#
if wet:
lamp=self.lampdata['cvsby']
lampBestCat=self.lampdata['cvis_bestCat']
probabilities=self.lampdata['cvprob']
thresholds=self.cvisthr
else:
lamp=self.lampdata['vsby']
lampBestCat=self.lampdata['vis_bestCat']
probabilities=self.lampdata['vprob']
thresholds=self.visthr
tcat = Avn.category(taf['vsby']['value'],_LAMPVisibilities)
if tcat == lampBestCat:
try:
if taf['obv']['str'] in ['BR','FG']:
if taf['vsby']['value'] <= 0.5:
taf['obv']['str'] = 'FG'
else:
taf['obv']['str'] = 'BR'
except KeyError:
pass
return
#
# Determine if we can hit taf's category by seeing how much its off
if tcat > lampBestCat and _inCategory(lampBestCat,thresholds,probabilities,deltas['up']):
return
if tcat < lampBestCat and _inCategory(tcat,thresholds,probabilities,deltas['down']):
return
#
# Check precip/obvis in the VFR/VLIFR cases, all other cases, TAF obvis will be accepted.
if lampBestCat < tcat:
taf['vsby'] = AvnLib.fixTafVsby(_LAMPNewVisibilities[lampBestCat][1])
#
# If LAMP forecasting VLIFR and TAF obvis is BR, change that
if lampBestCat == 1:
try:
if taf['obv'] and taf['obv']['str'] == 'BR':
taf['obv']['str'] = 'FG'
except KeyError:
pass
#
# Tedious for precipitation
try:
if lampBestCat == 1:
taf['pcp']['str'] = self._adjustSNDZIntensity(taf['pcp']['str'],'+')
else:
taf['pcp']['str'] = self._adjustSNDZIntensity(taf['pcp']['str'],'-')
except KeyError:
pass
if not taf.has_key('pcp') and not taf.has_key('obv'):
taf['obv'] = copy.copy(self.lampdata['obv'])
if taf['obv']['str'] == 'FG' and lampBestCat > 1:
taf['obv']['str'] = 'BR'
else:
#
# If there's obstruction to vision or precipitation, and LAMP indicates VFR
# better to accept forecaster's value in this case.
#
if lampBestCat > 5 and ('obv' in taf.keys() or self._isGroupWet(taf)):
return
#
# Otherwise, adjust according.
taf['vsby'] = AvnLib.fixTafVsby(_LAMPNewVisibilities[lampBestCat][0])
#
# Change occurrence of FG to BR
try:
if lampBestCat > 2 and taf['obv'] and taf['obv']['str'] == 'FG':
taf['obv']['str'] = 'BR'
except KeyError:
pass
#
# Tedious for precipitation
try:
if lampBestCat == 2:
taf['pcp']['str'] = self._adjustSNDZIntensity(taf['pcp']['str'],'+')
else:
taf['pcp']['str'] = self._adjustSNDZIntensity(taf['pcp']['str'],'-')
except KeyError:
pass
if lampBestCat < 7 and not taf.has_key('pcp') and not taf.has_key('obv'):
taf['obv'] = copy.copy(self.lampdata['obv'])
if taf['obv']['str'] == 'FG' and lampBestCat > 1:
taf['obv']['str'] = 'BR'
def checkWind(self):
"""Simply copies LAMP winds into TAF"""
#
# Provide LAMP winds aren't missing!
if not self.lampdata['wind']['str'].startswith('?'):
self.tprev['wind']=copy.copy(self.lampdata['wind'])
def _genOcnlPcp(self,otype):
"""Add precipitation to occasional group"""
if hasattr(self,'tocnl') and self.tocnl.has_key('pcp'):
return
if not hasattr(self,'tocnl'):
self.tocnl = { 'time': { 'from':self.vtime,'to':self.vtime+3600.0 }}
else:
self.tocnl['time'] = { 'from':self.vtime,'to':self.vtime+3600.0 }
self.tocnl['type']=otype
self.tocnl['pcp'] = self.lampdata['pcp']
self.tocnl['vsby'] = self.lampdata['cvsby']
self.tocnl['sky'] = self.lampdata['csky']
try:
self.tocnl['obv'] = self.lampdata['obv']
except KeyError:
pass
def _genPrevailingPcp(self):
"""Add precipitation to prevailing group"""
self.tprev['pcp'] = self.lampdata['pcp']
self.tprev['vsby'] = self.lampdata['cvsby']
try:
if not self.tprev.has_key('obv'):
self.tprev['obv'] = self.lampdata['obv']
except KeyError:
pass
def checkPrecip(self,bbound=-.1,tbound=.1):
"""Compare guidance and official TAF to see if they agree w.r.t precipitation"""
#
# Probability 'score' combines 6-h POP and relative probability over
# climatology 0.17 ~= 1/6, bleh.
#
score = self.lampdata.get('pop6hr',0)*.17+self.lampdata['pcp']['pop']
#
# A dry TAF
if not self.wet:
# Look at the 'score' value to determine if precip is warranted
if score <= 30.0:
return
elif 30 < score <= 50.0:
if self.lampprj > 9:
self._genOcnlPcp('PROB')
elif 50 < score <= 70.0:
self._genOcnlPcp('TEMPO')
else:
self._genPrevailingPcp()
return
#
# TAF is wet, but LAMP indicates dry
elif self.lampdata['pcp']['pcat'] == 0:
#
# if prevailing group of TAF is wet...
if self._isGroupWet(self.tprev):
#
# Use the freezing precipitation that LAMP suggests
if 'FZ' in self.lampdata['pcp']['str'] and \
not 'FZ' in self.tprev['pcp']['str']:
self.tprev['pcp']=self.lampdata['pcp']
#
# but if probablity is low, demote or remove
if score < 40.0:
if self._tsCheck(self.tprev):
self.tprev['pcp']['str'] = 'TS'
else:
del self.tprev['pcp']
#
# Add the appropriate group
if 30 > score >= 40.0:
self._genOcnlPcp('TEMPO')
elif score <= 30.0 and self.lampprj > 9:
self._genOcnlPcp('PROB')
#
# If in TEMPO or PROB30 don't remove unless really low.
else:
if score <= 20.0:
#
# PROB30 is used only for precipiation and/or thunderstorms,
# so if its too low for PROB30, remove it entirely.
#
if self.tocnl['type'] == 'PROB':
self.tocnl = {}
else:
del self.tocnl['pcp']
#
# Both TAF and LAMP indicate precipitation
elif not self._isGroupWet(self.tprev):
#
# Promote PROB30 precip group as appropriate
if self.lampprj <= 9 and self.tocnl['type'] == 'PROB':
if score <= 70:
self.tocnl['type'] = 'TEMPO'
else:
self.tprev['pcp'] = copy.copy(self.tocnl['pcp'])
self.tocnl = {}
def _tsCheck(self,g):
"""See if TS is present"""
return 'pcp' in g and re_TS.match(g['pcp']['str'])
def _rmvTS(self,g):
"""Remove TS from weather string"""
new_wx=re_TS.sub('',g['str'])
if len(new_wx) < 2:
return ''
return new_wx
def checkTstms(self,bbound=-.1,tbound=.1):
"""Check for thunderstorms"""
#
if not 'tcat' in self.lampdata['pcp']:
return
#
# If LAMP suggests no thunderstorms, remove them if pot is low enough
if self.lampdata['pcp']['tcat'] == 0:
for group,threshold in [(self.tprev,0.9),(self.tocnl,0.5)]:
try:
score = self.lampdata['pcp']['pot']/self.poptthr
if self._tsCheck(group) and score <= threshold:
new_wx = self._rmvTS(group)
if new_wx:
group['pcp']['str']=new_wx
else:
del group
self.pcpn_changed=True
except KeyError:
pass
#
# Otherwise add them if threshold high enough
else:
for group,threshold in [(self.tprev,2.0),(self.tocnl,1.25)]:
try:
score = self.lampdata['pcp']['pot']/self.poptthr
if not self._tsCheck(group) and score >= threshold:
try:
plist = group['pcp'].get('str').split()
plist.insert(0,'TS')
except KeyError:
plist =['TS']
self.pcpn_changed=True
group['pcp']['str'] = TafGen.fixPcp(plist)
break
except KeyError:
pass
def _isGroupWet(self,g):
try:
return len(self._rmvTS(g['pcp'])) > 1
except KeyError:
return False
def _isWet(self):
return self._isGroupWet(self.tprev) or self._isGroupWet(self.tocnl)
def checkSyntax(self):
"""Checks for inconsistency in forecast"""
#
# Check the occasional group for duplication in the prevailing
items =self.tocnl.keys()
for item in items:
if item in ['time','type']: continue
try:
if self.tprev[item]['str'] != self.tocnl[item]['str']:
break
except KeyError:
break
else:
for item in items:
if item in ['time','type']: continue
del self.tocnl[item]
def printOfficalTAFPrj(self,tafDuration):
"""Print hourly TAF groups"""
taf=[]
try:
try:
taf=AvnLib.formatRecForMAIN(self.tprev,self.lampdata['ident'],
self.lampdata['amd'],
tafDuration=tafDuration)
except KeyError:
taf=AvnLib.formatRecForFM(self.tprev)
if len(self.tocnl.keys()) > 2:
taf.extend(AvnLib.formatRecForOCNL(self.tocnl))
except KeyError:
pass
return ' '.join(taf)
def flightCategory(self):
return AvnLib.flightCategory(self.tprev)
def _rmvBestCategoryBounces(key,startIdx,LAMPData):
index = 1
for i in xrange(startIdx,len(LAMPData)):
try:
p1,p2,p3 = LAMPData[startIdx+index-1][key],LAMPData[startIdx+index][key], \
LAMPData[startIdx+index+1][key]
if p1 == p3 and p1 != p2:
LAMPData[startIdx+index][key] = p1
index=index+1
except IndexError:
pass
def TAMPGenerator(LAMP,TAFData,thresholdsDict,amdmt=' ',cvOnly=True,longFmt=True,
tafDuration=24):
"""Combine latest TAF with LAMP guidance"""
LAF = None
#
# Find first LAMP projection for forecast
LAMPData = LAMP.data['group']
now = AvnLib.getValidTime('taf',amdmt)
#
startIdx=0
for startIdx,prj in enumerate(LAMPData):
if now <= prj['time']['from']:
break
else:
raise Avn.AvnError('No guidance available')
#
if len(thresholdsDict) == 0:
raise Avn.AvnError('No thresholds available')
#
# Create lists of proper thresholds and valid times for the LAMP data we're going to
# use
#
thresholds = [thresholdsDict[x] for x in xrange(startIdx+1,len(thresholdsDict)+1)]
ValidTimes = [prj['time']['from'] for prj in LAMPData[startIdx:]]
#
# Remove 1hr bounces
index = 1
for i in xrange(startIdx,len(LAMPData)):
try:
p1,p2,p3 = LAMPData[startIdx+index-1]['pcp']['pcat'],LAMPData[startIdx+index]['pcp']['pcat'], \
LAMPData[startIdx+index+1]['pcp']['pcat']
if p1 == p3 and p1 != p2:
LAMPData[startIdx+index]['pcp']['pcat'] = p1
index=index+1
except IndexError:
pass
_rmvBestCategoryBounces('ccig_bestCat',startIdx,LAMPData)
_rmvBestCategoryBounces('cvis_bestCat',startIdx,LAMPData)
_rmvBestCategoryBounces('cig_bestCat',startIdx,LAMPData)
_rmvBestCategoryBounces('vis_bestCat',startIdx,LAMPData)
#
# Generate LAMP TAF based on 'smoothed' data and append to
# original TAF when creating the next regular issued TAF
#
if amdmt[0] == ' ':
tafGen = TafGen.TafGen('gfslamp',LAMP.data,amdmt,now)
tafCfgDict = tafGen.grpTaf.copy()
LAF = tafGen.formNewDic(False)
pos = len(TAFData)
#
# LAMP may not be able to add anything here.
try:
TAFData.extend([copy.deepcopy(group) for group in LAF \
if group['prev']['time']['to'] >= TAFData[-1]['prev']['time']['to']])
if TAFData[pos]['prev']['time']['from'] < TAFData[pos-1]['prev']['time']['to']:
TAFData[pos]['prev']['time']['from'] = TAFData[pos-1]['prev']['time']['to']
except (KeyError, IndexError):
pass
else:
tafCfgDict = TafGen.Config(LAMP.data['ident']['str'],'gfslamp').grpTaf().copy()
#
# Map the TAF to each LAMP forecast hour
TAFIndex=[]
o = len(TAFData)
for i,grp in enumerate(TAFData):
for vtime in ValidTimes:
o = len(TAFData)
if grp.has_key('ocnl'):
shr = _nearesthr(grp['ocnl']['time']['from'])
if shr <= vtime < grp['ocnl']['time']['to']:
o = i
shr = _nearesthr(grp['prev']['time']['from'])
if shr <= vtime < grp['prev']['time']['to']:
TAFIndex.append((i,o))
if vtime >= grp['prev']['time']['to']:
break
#
# Fill out the rest of sequence
for x in xrange(len(ValidTimes)-len(TAFIndex)):
TAFIndex.append((o,o))
TAFData.append({'prev':{},'ocnl':{}})
#
# First projection object needs additional information to be formatted correctly.
LAMPData[startIdx]['amd']=amdmt
LAMPData[startIdx]['ident']=LAMP.data['ident']['str']
#
# Construct the objects
TAFPrjs = [TUGPrj(lampprj=p,vtime=v,lampdata=l,tprev=tp,tocnl=to,visthr=th['vis'],cvisthr=th['cvis'],
cigthr=th['cig'],ccigthr=th['ccig'],popothr=th.get('popt',[0])[0],
poptthr=th.get('pott',[0])[0])
for p,v,l,tp,to,th in zip(xrange(1,len(LAMPData[startIdx:])+1),ValidTimes,LAMPData[startIdx:],
[copy.deepcopy(TAFData[n[0]]['prev']) for n in TAFIndex],
[copy.deepcopy(TAFData[n[1]]['ocnl']) for n in TAFIndex],
thresholds)]
newTAF=[]
for prj in TAFPrjs:
if not cvOnly:
prj.checkPrecip()
prj.checkTstms()
prj.checkSky(tafCfgDict)
prj.checkVsby()
prj.checkWind()
prj.checkSyntax()
if longFmt:
newTAF.append(prj.printOfficalTAFPrj(tafDuration).rstrip())
if not longFmt:
newTAF = _summarizeTAFPrjs(TAFPrjs,TAFData,tafDuration)
newTAF = AvnLib.indentTaf(newTAF)
if amdmt.startswith('A'):
newTAF.insert(0,'TAF AMD')
else:
newTAF.insert(0,'TAF')
return newTAF
| 35.166958
| 116
| 0.504313
| 4,297
| 40,231
| 4.682104
| 0.188271
| 0.020279
| 0.01521
| 0.002436
| 0.206273
| 0.185795
| 0.151002
| 0.145733
| 0.132909
| 0.116209
| 0
| 0.025348
| 0.377321
| 40,231
| 1,143
| 117
| 35.197725
| 0.777773
| 0.193458
| 0
| 0.390244
| 0
| 0
| 0.050736
| 0.000965
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04878
| false
| 0.03794
| 0.004065
| 0.00271
| 0.126016
| 0.00271
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab9caf6fe468bbb3e4d5427518d3f44ab50ebfdb
| 6,154
|
py
|
Python
|
Documentation/utilities/opal_stats.py
|
OPAL-Project/OPAL-Docker
|
c88ccd199efd63358589ceea914ce8de519221a7
|
[
"MIT"
] | 2
|
2020-03-04T15:38:35.000Z
|
2021-06-26T12:12:03.000Z
|
Documentation/utilities/opal_stats.py
|
OPAL-Project/OPAL-Docker
|
c88ccd199efd63358589ceea914ce8de519221a7
|
[
"MIT"
] | 20
|
2018-06-14T10:28:02.000Z
|
2020-02-19T11:28:29.000Z
|
Documentation/utilities/opal_stats.py
|
OPAL-Project/OPAL-Docker
|
c88ccd199efd63358589ceea914ce8de519221a7
|
[
"MIT"
] | 1
|
2018-08-21T21:57:28.000Z
|
2018-08-21T21:57:28.000Z
|
from __future__ import division, print_function
import time
import os
import configargparse
import bz2
import csv
import json
import sys
from datetime import datetime
from os import listdir
from os.path import isfile, join
import multiprocessing as mp
def process_file(records_reader, file):
core_fields = {"file": file, "lines": 0, "calls": 0, "texts": 0, "NotWellFormedTooLong": 0,
"NotWellFormedTooShort": 0, "NotWellFormedWrongCallType": 0, "NotWellFormedWrongNumberFormat": 0,
"NotWellFormedDate": 0, "NotWellFormedCallDuration": 0}
stats = dict(core_fields)
stats["lines"] = 0
for row in records_reader:
# We compute the statistics for the row
stats["lines"] = stats["lines"] + 1
fields_vals = row[0].split(';')
if len(fields_vals) < 9:
stats["NotWellFormedTooShort"] = stats["NotWellFormedTooShort"] + 1
continue
if len(fields_vals) > 9:
stats["NotWellFormedTooLong"] = stats["NotWellFormedTooLong"] + 1
continue
try:
call_type = int(fields_vals[0])
if not call_type in [1, 2]:
stats["NotWellFormedWrongCallType"] = stats["NotWellFormedWrongCallType"] + 1
continue
except ValueError:
stats["NotWellFormedWrongCallType"] = stats["NotWellFormedWrongCallType"] + 1
continue
try:
int(fields_vals[1])
except ValueError:
stats["NotWellFormedWrongNumberFormat"] = stats["NotWellFormedWrongNumberFormat"] + 1
continue
try:
int(fields_vals[6])
except ValueError:
stats["NotWellFormedWrongNumberFormat"] = stats["NotWellFormedWrongNumberFormat"] + 1
continue
try:
datetime.strptime(fields_vals[2], '%Y-%m-%d %H:%M:%S')
except ValueError:
stats["NotWellFormedDate"] = stats["NotWellFormedDate"] + 1
continue
try:
call_duration = int(fields_vals[8])
except ValueError:
stats["NotWellFormedCallDuration"] = stats["NotWellFormedCallDuration"] + 1
continue
if call_duration == 1 :
stats["texts"] = stats["texts"] + 1
else:
stats["calls"] = stats["calls"] + 1
return stats
# STATS to be extracted :
# 1) Total number of call and text per hour
# 2) Total number of unique numbers per hour
# 3) Check the pseudonymization (that the same number appear in all files)
# We process a file by first extracting the compressed data into a csv
def process_day(writing_queue, zip_files, path):
for i in range(len(zip_files)):
zip_file = zip_files[i]
file_name = zip_file[:-4] # assuming the filepath ends with .bz2
zipfile = bz2.BZ2File(path + '/' + zip_file) # open the file
data = zipfile.read() # get the decompressed data
open(path + '/' + file_name, 'wb').write(data) # write a uncompressed file
csv_path = os.path.join(path, file_name)
# csv_path = path + '/' + zip_files[i]
with open(csv_path, 'r') as csvfile:
records_reader = csv.reader(csvfile, delimiter=',')
next(records_reader, None)
stats_final = process_file(records_reader, zip_files[i])
csvfile.close()
writing_queue.put(stats_final)
os.remove(csv_path)
return True
def write_stats_to_csv(writing_queue, save_path):
"""Write user in writing_queue to csv."""
while True:
# wait for result to appear in the queue
stats = writing_queue.get()
# if got signal 'kill' exit the loop
if stats == 'kill':
break
csv_path = os.path.join(save_path, 'stats.csv')
with open(csv_path, 'a') as csv:
json.dump(stats, csv)
csv.write('\n')
csv.close()
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
#####################################
# main program #
#####################################
parser = configargparse.ArgumentParser(
description='Generate statistics for OPAL raw dataset.')
parser.add_argument('--num_threads', type=int, required=True,
help='Number of threads to be used to create data.')
parser.add_argument('--data_path', required=True,
help='Data path where generated csv have to be saved.')
args = parser.parse_args()
fileDirectory = args.data_path
if __name__ == "__main__":
# Prevent attempt to start a new process before the current process has finished its bootstrapping phase in Windows.
if os.name == 'nt':
mp.freeze_support()
print("Starting...")
start_time = time.time()
# We check if a stats file already exists, if it exists we cancel the operation
csv_path = os.path.join(args.data_path, 'stats.csv')
if os.path.exists(csv_path):
print("The stats file already exists. I am not overwriting it. Cancelling operations.")
sys.exit()
# set up parallel processing
manager = mp.Manager()
writing_queue = manager.Queue()
jobs = []
# additional 1 process is for which shouldn't take up much CPU power
pool = mp.Pool(processes=args.num_threads + 1)
pool.apply_async(write_stats_to_csv, (writing_queue, args.data_path))
if os.path.exists(fileDirectory):
filesName = [f for f in listdir(fileDirectory) if isfile(join(fileDirectory, f))]
chunks = chunks(filesName, args.num_threads - 1)
chunksList = list(chunks)
for n in range(len(chunksList)):
print(chunksList[n])
jobs.append(pool.apply_async(
process_day, (writing_queue, chunksList[n], fileDirectory)))
# clean up parallel processing (close pool, wait for processes to
# finish, kill writing_queue, wait for queue to be killed)
pool.close()
for job in jobs:
job.get()
writing_queue.put('kill')
pool.join()
elapsed_time = time.time() - start_time
print("The threads are done and it took: %f" % (elapsed_time))
| 37.072289
| 120
| 0.619922
| 743
| 6,154
| 5.010767
| 0.306864
| 0.032232
| 0.016116
| 0.010475
| 0.138061
| 0.124362
| 0.052646
| 0.052646
| 0.052646
| 0
| 0
| 0.009743
| 0.266168
| 6,154
| 165
| 121
| 37.29697
| 0.814659
| 0.159571
| 0
| 0.174603
| 0
| 0
| 0.182768
| 0.082413
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031746
| false
| 0
| 0.095238
| 0
| 0.142857
| 0.039683
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ab9dd6f296e330b5dfe57858dfcb987488e38617
| 1,677
|
py
|
Python
|
api/tests/test_category.py
|
KamilJakubczak/budget-api
|
b1c602b38183b46d09b267a3b848d3dcf5d293c6
|
[
"MIT"
] | null | null | null |
api/tests/test_category.py
|
KamilJakubczak/budget-api
|
b1c602b38183b46d09b267a3b848d3dcf5d293c6
|
[
"MIT"
] | 3
|
2020-08-25T18:19:42.000Z
|
2022-02-13T19:39:19.000Z
|
api/tests/test_category.py
|
KamilJakubczak/budget-api
|
b1c602b38183b46d09b267a3b848d3dcf5d293c6
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.urls import reverse
from django.contrib.auth import get_user_model
from rest_framework import status
from rest_framework.test import APIClient
from api.models import Category
category_URL = reverse('api:category-list')
class PublicTestCase(TestCase):
"""
Test for publicy avaialable category API
"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""
Tests if login is required for retriving categorys
"""
res = self.client.get(category_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class ModelTests(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
'testuser',
'supertest'
)
self.client = APIClient()
def test_retrieve_recursd_category_name(self):
category1 = Category.objects.create(name='category1',
user=self.user)
category2 = Category.objects.create(name='category2',
user=self.user,
parent_category=category1)
category3 = Category.objects.create(name='category3',
user=self.user,
parent_category=category2)
expected1 = 'category1'
self.assertEqual(category1.__str__(), expected1)
expected2 = 'category1 - category2'
self.assertEqual(category2.__str__(), expected2)
expected3 = 'category1 - category2 - category3'
self.assertEqual(category3.__str__(), expected3)
| 27.048387
| 71
| 0.618366
| 166
| 1,677
| 6.054217
| 0.355422
| 0.059701
| 0.062687
| 0.074627
| 0.103483
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021997
| 0.29517
| 1,677
| 61
| 72
| 27.491803
| 0.828257
| 0.054264
| 0
| 0.171429
| 0
| 0
| 0.080155
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 1
| 0.114286
| false
| 0
| 0.171429
| 0
| 0.342857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aba0d165ccc8910e46a4f5ec4ae38db5c9961875
| 26,044
|
py
|
Python
|
python/imprinting_analysis/imprinting_analysis.py
|
jonassibbesen/hamster-project-scripts
|
2d470dd028be77c9d866d67d16adc0c17d5ba819
|
[
"MIT"
] | 3
|
2021-03-25T08:26:18.000Z
|
2022-01-05T08:45:42.000Z
|
python/imprinting_analysis/imprinting_analysis.py
|
jonassibbesen/hamster-project-scripts
|
2d470dd028be77c9d866d67d16adc0c17d5ba819
|
[
"MIT"
] | null | null | null |
python/imprinting_analysis/imprinting_analysis.py
|
jonassibbesen/hamster-project-scripts
|
2d470dd028be77c9d866d67d16adc0c17d5ba819
|
[
"MIT"
] | 1
|
2021-05-14T21:28:42.000Z
|
2021-05-14T21:28:42.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 13 12:12:25 2021
@author: Jordan
"""
import sys
import os
import numpy as np
import pandas as pd
import seaborn as sns
import tempfile
import gc
import re
import collections
import gzip
import bisect
import pickle
import itertools
import math
sns.set_style('whitegrid')
# make 2 maps:
# - from transcript ID to row numbers of corresponding haplotype specific transcripts
# - from cluster ID to corresponding transcript IDs
def row_dicts(tab):
tx_rows = {}
cluster_txs = {}
for i in range(tab.shape[0]):
tx_id = tab.Name.values[i].split("_")[0]
clust_id = tab.ClusterID.values[i]
if tx_id not in tx_rows:
tx_rows[tx_id] = []
tx_rows[tx_id].append(i)
if clust_id not in cluster_txs:
cluster_txs[clust_id] = set()
cluster_txs[clust_id].add(tx_id)
for clust_id in cluster_txs:
cluster_txs[clust_id] = sorted(cluster_txs[clust_id])
return tx_rows, cluster_txs
def gene_to_row_dict(tx_rows):
gene_to_tx_rows = {}
for tx_id in tx_id_to_gene:
gene = tx_id_to_gene[tx_id]
if gene not in gene_to_tx_rows:
gene_to_tx_rows[gene] = []
if tx_id in tx_rows:
gene_to_tx_rows[gene].extend(tx_rows[tx_id])
return gene_to_tx_rows
def parse_attr(attr):
attrs = {}
for t in attr.split(";"):
tokens = t.strip().replace("\"", "").split()
if len(tokens) == 0:
continue
tag, val = tokens
attrs[tag] = val
return attrs
def get_haplotypes(chrom, start, end, sample, genotypes):
chrom_start = bisect.bisect_left(genotypes.CHROM.values, chrom)
chrom_end = bisect.bisect_right(genotypes.CHROM.values, chrom)
region_start = bisect.bisect_left(genotypes.POS.values, start, chrom_start, chrom_end)
region_end = bisect.bisect_right(genotypes.POS.values, end, chrom_start, chrom_end)
blocks = []
for i in range(region_start, region_end):
genotype = genotypes[sample].values[i]
phased = "|" in genotype
if len(blocks) == 0 or not phased:
blocks.append({})
al1, al2 = re.split("[\\|\\\\]", genotype)
formatted_alleles = []
for al in (al1, al2):
fal = ""
if al.isdigit():
j = int(al)
if j == 0:
fal = genotypes.REF.values[i]
else:
fal = genotypes.ALT.values[i].split(",")[j - 1]
formatted_alleles.append(fal)
blocks[-1][genotypes.POS.values[i]] = tuple(formatted_alleles)
return blocks
if __name__ == "__main__":
assert(len(sys.argv) == 9)
# gencode annotations
gtf = sys.argv[1]
# list of genes we're interested in
focal_genes = sys.argv[2]
# structured string in format SAMPLE1:rpvg_table1,SAMPLE2:rpvg_table2
tab_string = sys.argv[3]
# structured string in format SAMPLE1:sorted_gibbs_table1,SAMPLE2:sorted_gibbs_table2
gibbs_string = sys.argv[4]
# file constaining list of hst to variant files
hst_variant_list = sys.argv[5]
# file containing list of VCFs (probably reduced to these samples)
vcf_list = sys.argv[6]
# variants for the focal genes in one table
variant_table = sys.argv[7]
# directory for output
out_dir = sys.argv[8]
tabs = []
samples = []
for tab_sample in tab_string.split(","):
assert(":" in tab_sample)
samp, tab = tab_sample.split(":")
tabs.append(tab)
samples.append(samp)
gibbs_tabs = []
gibbs_samples = []
for tab_sample in gibbs_string.split(","):
assert(":" in tab_sample)
samp, tab = tab_sample.split(":")
gibbs_tabs.append(tab)
gibbs_samples.append(samp)
assert(samples == gibbs_samples)
assert(os.path.isdir(out_dir))
assert(os.path.exists(gtf))
assert(os.path.exists(focal_genes))
assert(os.path.exists(vcf_list))
for tab in tabs:
assert(os.path.exists(tab))
for tab in gibbs_tabs:
assert(os.path.exists(tab))
vcfs = []
with open(vcf_list) as f:
for line in f:
if type(line) == bytes:
line = line.decode("utf-8")
vcf = line.strip()
assert(os.path.exists(vcf))
vcfs.append(vcf)
# make a look table for the file name by chromosome
hst_variant_files = {}
with open(hst_variant_list) as f:
for line in f:
if type(line) == bytes:
line = line.decode("utf-8")
fname = line.strip()
with open(fname) as hst_f:
#skip the header
next(hst_f)
hst_line = next(hst_f)
if type(hst_line) == bytes:
hst_line = hst_line.decode("utf-8")
hst_variant_files[hst_line.split()[0]] = fname
tmpdir = tempfile.TemporaryDirectory()
tmppref = tmpdir.name
###############
focal_genes_set = set()
for line in open(focal_genes):
if type(line) == bytes:
line = line.decode("utf-8")
focal_genes_set.add(line.strip().split()[0])
###############
# load the GTF
gencode = pd.read_csv(gtf, sep = "\t", header = None, skiprows = list(range(5)))
gencode.columns = ["chr", "src", "type", "start", "end", "score", "strand", "frame", "attr"]
gencode['chr'] = gencode['chr'].apply(str)
###############
print("loading gene annotations...", file = sys.stderr)
# parse the GTF into useful indexes
gene_coords = {}
tx_models = {}
tx_id_to_name = {}
tx_id_to_gene = {}
exonic_regions = {}
for i in range(gencode.shape[0]):
attrs = parse_attr(gencode.attr.values[i])
gene = attrs["gene_id"]
if gene not in tx_models:
tx_models[gene] = {}
chrom = gencode.chr.values[i]
if chrom.startswith("chr"):
chrom = chrom[3:]
if gene in tx_models:
if gencode.type.values[i] == "gene":
gene_coords[gene] = (chrom, gencode.start.values[i], gencode.end.values[i])
elif gencode.type.values[i] == "exon":
tx_id = attrs["transcript_id"]
if tx_id not in tx_models[gene]:
tx_models[gene][tx_id] = []
tx_models[gene][tx_id].append((chrom, gencode.start.values[i], gencode.end.values[i]))
###############
tx_id_to_gene[tx_id] = gene
###############
if "transcript_id" in attrs and "transcript_name" in attrs:
tx_id_to_name[attrs["transcript_id"]] = attrs["transcript_name"]
###############
if gencode.type.values[i] == "exon":
if chrom not in exonic_regions:
exonic_regions[chrom] = []
exonic_regions[chrom].append([gencode.start.values[i], gencode.end.values[i]])
###############
# reverse the transcript gene table
gene_to_tx_ids = {}
for tx_id in tx_id_to_gene:
gene = tx_id_to_gene[tx_id]
if gene not in gene_to_tx_ids:
gene_to_tx_ids[gene] = []
gene_to_tx_ids[gene].append(tx_id)
###############
all_genes = sorted(gene_to_tx_ids)
###############
# collapse the exonic regions that overlap
for chrom in exonic_regions:
i, j = 0, 0
intervals = exonic_regions[chrom]
intervals.sort()
while j < len(intervals):
if intervals[j][0] <= intervals[i][1]:
intervals[i][1] = max(intervals[i][1], intervals[j][1])
else:
i += 1
intervals[i] = intervals[j]
j += 1
while len(intervals) > i + 1:
intervals.pop()
###############
# this is a big table and we don't need it any more, clear it out
del gencode
gc.collect()
###############
print("computing credible intervals...", file = sys.stderr)
sample_tx_cred_intervals = {}
for samp, tab in zip(gibbs_samples, gibbs_tabs):
tx_cred_intervals = []
sample_tx_cred_intervals[samp] = tx_cred_intervals
def record_cred_interval(hst_exprs, credibility):
if len(hst_exprs) == 0:
return
for hst1, hst2 in sorted(set(tuple(sorted(pair)) for pair in itertools.combinations(hst_exprs, 2))):
ratios = []
hst1_expr = hst_exprs[hst1]
hst2_expr = hst_exprs[hst2]
assert(len(hst1_expr) == len(hst2_expr))
for i in range(len(hst1_expr)):
if hst1_expr[i] == 0.0 or hst2_expr[i] == 0.0:
# log ratio undefined if either is 0
continue
ratios.append(math.log(hst1_expr[i] / hst2_expr[i], 2.0))
if len(ratios) == 0:
continue
# find the credible interval
ratios.sort()
i1 = min(int(round(len(ratios) * (1.0 - credibility) / 2.0)), len(ratios) - 1)
i2 = min(int(round(len(ratios) * (1.0 - (1.0 - credibility) / 2.0))), len(ratios) - 1)
r1 = ratios[i1]
r2 = ratios[i2]
tx_cred_intervals.append((hst1, hst2, r1, r2))
# take either gzip or unzipped file
f = None
if tab.endswith(".gz"):
f = gzip.open(tab)
else:
f = open(tab)
# the credibility i'm using
credibility = .9
curr_tx = None
hst_gibbs_exprs = None
txs_seen = set()
for line in f:
if type(line) == bytes:
line = line.decode("utf-8")
if line.startswith("Name"):
# skip the header
continue
tokens = line.split()
hst = tokens[0]
tx = hst.split("_")[0]
if tx != curr_tx:
# were on to a new transcript, make sure we haven't seen it before
assert(tx not in txs_seen)
txs_seen.add(tx)
if curr_tx is not None:
# record the ratios of the HSTs for the previous transcript
record_cred_interval(hst_gibbs_exprs, credibility)
# fresh data structures for this transcript
curr_tx = tx
hst_gibbs_exprs = {}
# record the row of expression values
hst_gibbs_exprs[hst] = [float(tokens[i]) for i in range(2, len(tokens))]
if curr_tx is not None:
# the final transcript
record_cred_interval(hst_gibbs_exprs, credibility)
sample_tx_cred_intervals_output = os.path.join(out_dir, "sample_tx_cred_intervals.pkl")
with open(sample_tx_cred_intervals_output, "wb") as f:
pickle.dump(sample_tx_cred_intervals, f)
###############
print("loading genotypes...", file = sys.stderr)
genotypes = pd.read_csv(variant_table, sep = "\t")
genotypes['CHROM'] = genotypes['CHROM'].apply(str)
genotypes.sort_values(["CHROM", "POS"], inplace = True)
genotypes = genotypes.loc[np.invert(genotypes.duplicated()),:]
#################
print("loading HST variants...", file = sys.stderr)
hst_variants = {}
for hst_file in hst_variant_files.values():
hst_table = pd.read_csv(hst_file, sep = "\t", header = 0)
hst_table['Chrom'] = hst_table['Chrom'].apply(str)
for i in range(hst_table.shape[0]):
if type(hst_table.HSTs.values[i]) == float:
# this seems to happen when the list of HSTs is empty
continue
hsts = hst_table.HSTs.values[i].split(",")
for hst in hsts:
tx = hst.split("_")[0]
gene = tx_id_to_gene[tx]
if not gene in focal_genes_set:
continue
if not hst in hst_variants:
hst_variants[hst] = []
var = (hst_table.Pos.values[i], hst_table.Allele.values[i])
hst_variants[hst].append(var)
del hst_table
gc.collect()
#################
sample_higher_haplo_expr = {}
sample_lower_haplo_expr = {}
sample_informative_expr = {}
sample_haplo_1_is_higher = {}
sample_haplo_hsts = {}
for i in range(len(tabs)):
sample = samples[i]
tab = tabs[i]
print("computing haplotype expression for sample {}...".format(sample), file = sys.stderr)
sample_expr = pd.read_csv(tab, sep = "\t")
sample_tx_rows, sample_cluster_txs = row_dicts(sample_expr)
higher_haplo_expr = {}
lower_haplo_expr = {}
informative_expr = {}
haplo_1_is_higher = {}
haplo_hsts = {}
sample_higher_haplo_expr[sample] = higher_haplo_expr
sample_lower_haplo_expr[sample] = lower_haplo_expr
sample_informative_expr[sample] = informative_expr
sample_haplo_1_is_higher[sample] = haplo_1_is_higher
sample_haplo_hsts[sample] = haplo_hsts
for gene in focal_genes_set:
chrom, start, end = gene_coords[gene]
blocks = get_haplotypes(chrom, start, end, sample, genotypes)
if len(blocks) > 1:
print("sample {} has {} phase blocks on gene {}, skipping".format(sample, len(blocks), gene), file = sys.stderr)
continue
block = blocks[0]
if not gene in higher_haplo_expr:
higher_haplo_expr[gene] = {}
lower_haplo_expr[gene] = {}
informative_expr[gene] = {}
gene_higher_haplo_expr = higher_haplo_expr[gene]
gene_lower_haplo_expr = lower_haplo_expr[gene]
gene_informative_expr = informative_expr[gene]
haplo_1_expr = {}
haplo_2_expr = {}
for tx_id in gene_to_tx_ids[gene]:
haplo_1_expr[tx_id] = 0.0
haplo_2_expr[tx_id] = 0.0
total_informative_expr = 0.0
haplo_hsts[tx_id] = [None, None]
for i in sample_tx_rows[tx_id]:
ex = sample_expr.TPM.values[i]
hst = sample_expr.Name.values[i]
match_1 = True
match_2 = True
for pos, allele in hst_variants[hst]:
hap_1, hap_2 = block[pos]
match_1 = match_1 and allele == hap_1
match_2 = match_2 and allele == hap_2
if match_1 and not match_2:
haplo_hsts[tx_id][0] = hst
haplo_1_expr[tx_id] += ex
elif match_2 and not match_1:
haplo_hsts[tx_id][1] = hst
haplo_2_expr[tx_id] += ex
if not (match_1 and match_2):
total_informative_expr += ex
if not tx_id in gene_informative_expr:
gene_informative_expr[tx_id] = []
gene_informative_expr[tx_id].append(total_informative_expr)
if sum(haplo_1_expr.values()) > sum(haplo_2_expr.values()):
higher = haplo_1_expr
lower = haplo_2_expr
haplo_1_is_higher[gene] = True
else:
lower = haplo_1_expr
higher = haplo_2_expr
haplo_1_is_higher[gene] = False
for tx_id in higher:
if not tx_id in gene_higher_haplo_expr:
gene_higher_haplo_expr[tx_id] = []
gene_lower_haplo_expr[tx_id] = []
gene_higher_haplo_expr[tx_id].append(higher[tx_id])
gene_lower_haplo_expr[tx_id].append(lower[tx_id])
#################
higher_haplo_output = os.path.join(out_dir, "sample_higher_haplo_expr.pkl")
with open(higher_haplo_output, "wb") as f:
pickle.dump(sample_higher_haplo_expr, f)
lower_haplo_output = os.path.join(out_dir, "sample_lower_haplo_expr.pkl")
with open(lower_haplo_output, "wb") as f:
pickle.dump(sample_lower_haplo_expr, f)
informative_output = os.path.join(out_dir, "sample_informative_expr.pkl")
with open(informative_output, "wb") as f:
pickle.dump(sample_informative_expr, f)
which_haplo_output = os.path.join(out_dir, "sample_haplo_1_is_higher.pkl")
with open(which_haplo_output, "wb") as f:
pickle.dump(sample_haplo_1_is_higher, f)
haplo_hsts_output = os.path.join(out_dir, "sample_haplo_hsts.pkl")
with open(haplo_hsts_output, "wb") as f:
pickle.dump(sample_haplo_hsts, f)
###############
print("identifying heterozygous variants...", file = sys.stderr)
inf = 2**62
het_positions = {}
for vcf in vcfs:
with gzip.open(vcf) as f:
samps = None
for line in f:
if type(line) == bytes:
line = line.decode("utf-8")
if line.startswith("##"):
continue
if line.startswith("#"):
samps = line.rstrip().split("\t")[9:]
for sample in samps:
if sample not in het_positions:
het_positions[sample] = set()
else:
tokens = line.rstrip().split("\t")
assert(len(tokens) == len(samps) + 9)
chrom_exonic_regions = exonic_regions[tokens[0]]
chrom = tokens[0]
pos = int(tokens[1])
idx = bisect.bisect(chrom_exonic_regions, [pos, inf])
if idx == 0:
# before the first exon
continue
elif chrom_exonic_regions[idx - 1][1] < pos:
# in between exons
continue
for i in range(9, len(tokens)):
genotype = tokens[i]
samp = samps[i - 9]
if "|" in genotype or "\\" in genotype:
al1, al2 = re.split("[\\|\\\\]", genotype)
if al1 != al2:
het_positions[samp].add((chrom, pos))
gc.collect()
###############
all_gene_intervals = sorted((interval[0], interval[1], interval[2], gene) for gene, interval in gene_coords.items())
sample_het_balance = {}
for i in range(len(tabs)):
tab = tabs[i]
sample = samples[i]
if sample not in sample_het_balance:
sample_het_balance[sample] = {}
het_balance = sample_het_balance[sample]
print("computing balance for sample {}".format(sample), file = sys.stderr)
buffer = collections.deque()
prev_chrom = None
tokens = None
pos = None
filesize = None
hst_file = None
gene_num = 0
sample_expr = pd.read_csv(tab, sep = "\t")
sample_tx_rows, sample_cluster_txs = row_dicts(sample_expr)
for chrom, start, end, gene in all_gene_intervals:
gene_num += 1
if gene_num % 2500 == 0:
print("processing gene {}".format(gene_num), file = sys.stderr)
gene_hst_variants = {}
if prev_chrom != chrom:
# we've switched chromosomes to a new file
if not chrom in hst_variant_files:
continue
hst_table = hst_variant_files[chrom]
#print("starting chrom {}".format(chrom), file = sys.stderr)
hst_file = open(hst_table)
filesize = os.fstat(hst_file.fileno()).st_size
# skip the header
hst_file.readline()
buffer.clear()
tell = hst_file.tell()
prev_pos = -1
tokens = hst_file.readline().strip().split()
var_chrom = tokens[0]
pos = int(tokens[1])
buffer.append((pos, tell))
# advance through rows that are strictly before this gene
while pos < start:
tell = hst_file.tell()
if tell == filesize:
break
prev_pos = pos
tokens = hst_file.readline().strip().split()
pos = int(tokens[1])
if pos != prev_pos:
buffer.append((pos, tell))
# remove any part of the buffer before this gene
while len(buffer) > 0:
buf_pos = buffer[0][0]
if buf_pos < start:
buffer.popleft()
else:
break
if len(buffer) > 0:
# everything before the start has been removed, except the current row
buf_pos, tell = buffer[0]
if buf_pos < pos:
# this occurred strictly before the current row, so we need to seek
# backwards
# reset the part of the buffer to the right of where we're seeking to
while len(buffer) > 1:
buffer.pop()
hst_file.seek(tell)
tokens = hst_file.readline().strip().split()
pos = int(tokens[1])
hst_vars = {}
# iterate over rows in the gene
while pos <= end:
if len(tokens) >= 5:
allele = tokens[3]
pos = int(tokens[1])
hsts = tokens[4].split(",")
for hst in hsts:
if hst not in hst_vars:
hst_vars[hst] = []
hst_vars[hst].append((pos, allele))
tell = hst_file.tell()
if tell == filesize:
# we hit the end of the file
break
prev_pos = pos
tokens = hst_file.readline().strip().split()
pos = int(tokens[1])
if pos != prev_pos:
# this is the first row we've seen with this position, remember
# it in the buffer
buffer.append((pos, tell))
prev_chrom = chrom
if gene not in het_balance:
het_balance[gene] = []
var_expr = {}
if gene not in gene_to_tx_ids:
continue
for tx_id in gene_to_tx_ids[gene]:
#print("looking at expression for tx " + tx_id, file = sys.stderr)
if tx_id not in sample_tx_rows:
continue
for i in sample_tx_rows[tx_id]:
ex = sample_expr.TPM.values[i]
if ex == 0.0:
continue
hst = sample_expr.Name.values[i]
#print("\thst " + hst + " has positive expression " + str(ex), file = sys.stderr)
if hst not in hst_vars:
# must not overlap any variants
continue
for var in hst_vars[hst]:
if var not in var_expr:
var_expr[var] = 0.0
var_expr[var] += ex
alleles = {}
for pos, allele in var_expr:
if pos not in alleles:
alleles[pos] = []
alleles[pos].append(allele)
for pos in alleles:
if (chrom, pos) not in het_positions[sample]:
continue
#print("looking at expression for pos " + chrom + " " + str(pos), file = sys.stderr)
total_expr = sum(var_expr[(pos, allele)] for allele in alleles[pos])
highest_expr = max(var_expr[(pos, allele)] for allele in alleles[pos])
#print("highest expr " + str(highest_expr) + ", total " + str(total_expr), file = sys.stderr)
het_balance[gene].append((highest_expr, total_expr))
del sample_expr
del sample_tx_rows
del sample_cluster_txs
gc.collect()
#################
balance_output = os.path.join(out_dir, "sample_het_balance.pkl")
with open(balance_output, "wb") as f:
pickle.dump(sample_het_balance, f)
tx_models_output = os.path.join(out_dir, "tx_models.pkl")
with open(tx_models_output, "wb") as f:
pickle.dump(tx_models, f)
tx_id_to_name_output = os.path.join(out_dir, "tx_id_to_name.pkl")
with open(tx_id_to_name_output, "wb") as f:
pickle.dump(tx_id_to_name, f)
| 35.05249
| 128
| 0.507986
| 3,077
| 26,044
| 4.080598
| 0.123497
| 0.017203
| 0.014495
| 0.007885
| 0.323829
| 0.24777
| 0.217267
| 0.159366
| 0.109908
| 0.087528
| 0
| 0.013072
| 0.386116
| 26,044
| 742
| 129
| 35.09973
| 0.772267
| 0.086123
| 0
| 0.208171
| 0
| 0.019455
| 0.033411
| 0.007714
| 0
| 0
| 0
| 0
| 0.027237
| 1
| 0.009728
| false
| 0
| 0.027237
| 0
| 0.046693
| 0.01751
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aba5a20bb67be074c4810bf3da7c2a271409d71b
| 2,146
|
py
|
Python
|
encdecpy/base64.py
|
IronVenom/encdecpy
|
cca838765e55af846484eebb71f1f49645d147c6
|
[
"MIT"
] | 9
|
2019-04-16T18:50:48.000Z
|
2022-03-15T11:57:02.000Z
|
encdecpy/base64.py
|
IronVenom/encdecpy
|
cca838765e55af846484eebb71f1f49645d147c6
|
[
"MIT"
] | null | null | null |
encdecpy/base64.py
|
IronVenom/encdecpy
|
cca838765e55af846484eebb71f1f49645d147c6
|
[
"MIT"
] | 1
|
2021-08-13T16:00:28.000Z
|
2021-08-13T16:00:28.000Z
|
# Dictionaries for base64 encoding and decoding.
encode_dict = {0:'A',1:'B',2:'C',3:'D',4:'E',5:'F',6:'G',7:'H',8:'I',9:'J',10:'K',11:'L',12:'M',13:'N',14:'O',15:'P',16:'Q',17:'R',18:'S',19:'T',20:'U',21:'V',22:'W',
23:'X',24:'Y',25:'Z',26:'a',27:'b',28:'c',29:'d',30:'e',31:'f',32:'g',33:'h',34:'i',35:'j',36:'k',37:'l',38:'m',39:'n',40:'o',41:'p',42:'q',43:'r',44:'s'
,45:'t',46:'u',47:'v',48:'w',49:'x',50:'y',51:'z',52:'0',53:'1',54:'2',55:'3',56:'4',57:'5',58:'6',59:'7',60:'8',61:'9',62:'+',63:"/"}
decode_dict = {a:b for b,a in encode_dict.items()}
class base64:
def encode(string):
binstream = ''
for i in string:
binstream+='0'*(8-len(f"{ord(i):b}"))+f"{ord(i):b}"
extra = 0
if len(binstream)%3!=0:
if len(binstream)%3 == 2:
binstream = '0'*16 + binstream
extra = 1
else:
binstream = '0'*8 + binstream
extra = 2
encode_bin = []
for i in range(0,int(len(binstream)//6)):
encode_bin.append(binstream[6*i:6*(i+1):1])
encoded_string = ''
for i in encode_bin:
encoded_string+=encode_dict[int(i,2)]
return encoded_string+'='*extra
def decode(string):
decode_stream = ''
newstring = ''
if string[-1] == '=':
if string[-2] == '=':
newstring = string[:-2]
for i in newstring:
decode_stream+='0'*(6-len(f"{decode_dict[i]:b}"))+f"{decode_dict[i]:b}"
decode_l = []
decode_stream = '0'*8 + decode_stream
for i in range(0,int(len(decode_stream)/8)):
decode_l.append(chr(int(decode_stream[i*8:8*(i+1):1],2)))
return ''.join(decode_l[2:])
else:
newstring = string[:-1]
for i in newstring:
decode_stream+='0'*(6-len(f"{decode_dict[i]:b}"))+f"{decode_dict[i]:b}"
decode_l = []
decode_stream = '0'*16 + decode_stream
for i in range(0,int(len(decode_stream)/8)):
decode_l.append(chr(int(decode_stream[i*8:8*(i+1):1],2)))
return ''.join(decode_l[4:])
else:
for i in string:
decode_stream+='0'*(6-len(f"{decode_dict[i]:b}"))+f"{decode_dict[i]:b}"
decode_l = []
for i in range(0,int(len(decode_stream)/8)):
decode_l.append(chr(int(decode_stream[i*8:8*(i+1):1],2)))
return ''.join(decode_l)
| 34.612903
| 166
| 0.567102
| 412
| 2,146
| 2.859223
| 0.291262
| 0.142615
| 0.04584
| 0.061121
| 0.456706
| 0.429542
| 0.429542
| 0.414261
| 0.414261
| 0.414261
| 0
| 0.10483
| 0.150979
| 2,146
| 62
| 167
| 34.612903
| 0.541712
| 0.021435
| 0
| 0.358491
| 0
| 0
| 0.096713
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037736
| false
| 0
| 0
| 0
| 0.132075
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aba69264e1ac48abb0bcfe70f1af9670b4edc6a2
| 590
|
py
|
Python
|
views/windows/basewindow.py
|
RamonWill/portfolio-management-project
|
ac8ce313f8d62f09810fc1da19d6b252f193871b
|
[
"MIT"
] | 14
|
2020-01-01T04:59:06.000Z
|
2022-02-08T06:48:21.000Z
|
views/windows/basewindow.py
|
linhvien/portfolio-management-project
|
ac8ce313f8d62f09810fc1da19d6b252f193871b
|
[
"MIT"
] | null | null | null |
views/windows/basewindow.py
|
linhvien/portfolio-management-project
|
ac8ce313f8d62f09810fc1da19d6b252f193871b
|
[
"MIT"
] | 8
|
2020-10-15T06:52:37.000Z
|
2021-10-04T06:44:36.000Z
|
import tkinter as tk
class BaseWindow(tk.Toplevel):
def __init__(self):
super().__init__()
self.base_frame = tk.Frame(self)
self.base_frame.pack(fill="both", expand="true")
self.base_frame.pack_propagate(0)
self.frame_styles = {
"relief": "groove",
"bd": 3,
"bg": "#94b4d1",
"fg": "#073bb3",
"font": ("Arial", 9, "bold"),
}
self.text_styles = {
"font": ("Verdana", 10),
"background": "#3F6BAA",
"foreground": "#E1FFFF",
}
| 24.583333
| 56
| 0.476271
| 58
| 590
| 4.603448
| 0.689655
| 0.089888
| 0.146067
| 0.127341
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042105
| 0.355932
| 590
| 23
| 57
| 25.652174
| 0.660526
| 0
| 0
| 0
| 0
| 0
| 0.166102
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.052632
| 0
| 0.157895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
aba82608b365c3add79167593a64438d0fe2fb8a
| 963
|
py
|
Python
|
sync/auth.py
|
tstodden/google-sheets-sync
|
d3b73b9a2b76da1fabc6d0b1db43fc6b71fd0d23
|
[
"MIT"
] | null | null | null |
sync/auth.py
|
tstodden/google-sheets-sync
|
d3b73b9a2b76da1fabc6d0b1db43fc6b71fd0d23
|
[
"MIT"
] | null | null | null |
sync/auth.py
|
tstodden/google-sheets-sync
|
d3b73b9a2b76da1fabc6d0b1db43fc6b71fd0d23
|
[
"MIT"
] | null | null | null |
import os
from typing import NamedTuple
from google.oauth2.service_account import Credentials as OAuthCredentials
from .constants import OAUTH_CONFIG_PATH, OAUTH_SCOPES
class PostgresCredentials:
def __init__(self):
self.host = os.environ.get("SYNC_DB_HOST")
self.dbname = os.environ.get("SYNC_DB_NAME")
self.user = os.environ.get("SYNC_DB_USER")
self.password = os.environ.get("SYNC_DB_PASSWORD")
class Credentials(NamedTuple):
postgres: PostgresCredentials
oauth: OAuthCredentials
class CredentialsController:
def get(self) -> Credentials:
credentials = Credentials(
postgres=PostgresCredentials(), oauth=self._get_creds_from_google()
)
return credentials
def _get_creds_from_google(self) -> OAuthCredentials:
credentials = OAuthCredentials.from_service_account_file(
OAUTH_CONFIG_PATH, scopes=OAUTH_SCOPES
)
return credentials
| 28.323529
| 79
| 0.718588
| 106
| 963
| 6.245283
| 0.339623
| 0.054381
| 0.072508
| 0.096677
| 0.108761
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001307
| 0.205607
| 963
| 33
| 80
| 29.181818
| 0.864052
| 0
| 0
| 0.083333
| 0
| 0
| 0.053998
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0.041667
| 0.166667
| 0
| 0.583333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
abab9d30016bdfbdb016a1d0c85da2a580756ebd
| 5,970
|
py
|
Python
|
accounts.py
|
SurajOliver/tradense
|
9f14b6114be1b5753f0252f081e6efb7c26abf19
|
[
"MIT"
] | null | null | null |
accounts.py
|
SurajOliver/tradense
|
9f14b6114be1b5753f0252f081e6efb7c26abf19
|
[
"MIT"
] | null | null | null |
accounts.py
|
SurajOliver/tradense
|
9f14b6114be1b5753f0252f081e6efb7c26abf19
|
[
"MIT"
] | null | null | null |
import datetime
import numpy as np
import pandas as pd
from google.oauth2 import service_account
from googleapiclient import discovery
SPREADSHEET_ID = "1otVI0JgfuBDJw8jlW_l8vHXyfo5ufJiXOqshDixazZA" # ALL-IN-ONE-LOG-2021
class Spreadsheet:
def __init__(self, spreadsheetId):
self.spreadsheetId = spreadsheetId
self.sheet = self.get_all_in_one_log()
def get_all_in_one_log(self):
SERVICE_ACCOUNT_FILE = "credentials.json"
SCOPES = ["https://www.googleapis.com/auth/spreadsheets"]
creds = service_account.Credentials.from_service_account_file(
SERVICE_ACCOUNT_FILE, scopes=SCOPES
)
service = discovery.build("sheets", "v4", credentials=creds)
sheet = service.spreadsheets()
return sheet
def getSheet(self):
return self.sheet
def list_last_five_trans():
result = (
sheet.values().get(spreadsheetId=SPREADSHEET_ID, range="Trans!B6:G").execute()
)
values = result.get("values", [])
df = pd.DataFrame(
values, columns=["Date", "Description", "Dummy", "Amount", "From A/c", "To A/c"]
)
df["Description"] = df["Description"].str.slice(0, 20)
print(df.tail(5))
return
def list_last_30_trans():
result = (
sheet.values().get(spreadsheetId=SPREADSHEET_ID, range="Trans!B6:G").execute()
)
values = result.get("values", [])
df = pd.DataFrame(
values, columns=["Date", "Description", "Dummy", "Amount", "From A/c", "To A/c"]
)
df["Description"] = df["Description"].str.slice(0, 20)
print(df.tail(30))
return
def check_balances():
res2 = (
sheet.values()
.get(spreadsheetId=SPREADSHEET_ID, range="Dashboard!B9:B19")
.execute()
)
acc = res2.get("values", [])
result = (
sheet.values()
.get(spreadsheetId=SPREADSHEET_ID, range="Dashboard!P9:P19")
.execute()
)
val = result.get("values", [])
balances = np.array(val)
balances = balances.flatten()
balances[balances == "#N/A"] = "0"
balances = list(map(float, balances))
(
C,
K,
Zer,
Zer_Comm,
Cams,
Samrudhi,
Citi,
K_Fixed,
Union,
Z_Hold,
Citi_Fixed,
) = balances
print(f"Cash Balance~~~~~~~~~~~~~~~~~~~~~:{C:.2f}")
print(
f"Saving A/c Balance~~~~~~~~~~~~~~~:{(K+Citi):.2f} with (Kotak-{K:.2f} and Citi-{Citi:.2f})"
)
print(
f"In FD (CB,Kotak,Union, Samruddhi):{(K_Fixed+Union+Citi_Fixed):.2f} with (K-{K_Fixed:.2f}, Citi-{Citi_Fixed:.2f})"
)
print(f"Unutilized in Shares~~~~~~~~~~~~~:{Zer:.2f}")
print(f"In CAMS MF~~~~~~~~~~~~~~~~~~~~~~~:{Cams:.2f}")
print(f"In shares~~~~~~~~~~~~~~~~~~~~~~~~:{Z_Hold:.2f}")
return
def check_expenses():
result = (
sheet.values()
.get(spreadsheetId=SPREADSHEET_ID, range="Dashboard!C46:C46")
.execute()
)
values = result.get("values", [])
print("Expenses for the year: " + values[0][0])
return
class Account:
def __init__(self, desc, amount, from_ac, to_ac):
self.desc = desc
self.amount = amount
self.from_ac = from_ac
self.to_ac = to_ac
self.catg = "Adjustment"
if self.from_ac == "C" or self.from_ac == "K":
self.catg = "Expense"
self.today = datetime.datetime.now()
self.period = datetime.date.strftime(self.today, "%Y-%m")
self.formatted_dt = datetime.date.strftime(self.today, "%m/%d/%Y")
self.new_trans = [
[
self.formatted_dt,
self.desc,
"",
self.amount,
self.from_ac,
self.to_ac,
"",
self.period,
self.catg,
]
]
def get_trans(self):
return self.new_trans
def add_new_record():
print("Adding new records, Enter description, amount, from a/c and to a/c")
desc = input("description is: ")
amount = input("trans amount: ")
from_ac = input(" from account: ")
to_ac = input(" to account: ")
account = Account(desc, amount, from_ac, to_ac)
print(" Transaction to be entered is: ", account.get_trans())
conf = 0
while conf != 1 and conf != 9:
conf = int(input(" Enter 1 to confirm, 9 to erase: "))
if conf == 9:
print("Exiting adding new record, please re-enter your choice: ")
return
request = sheet.values().append(
spreadsheetId=SPREADSHEET_ID,
range="Trans!B6:J",
valueInputOption="USER_ENTERED",
insertDataOption="INSERT_ROWS",
body={"values": account.get_trans()},
)
response = request.execute()
print("Added new record: ")
print(response)
return
class Choice:
switcher = {
1: add_new_record,
4: list_last_30_trans,
5: list_last_five_trans,
6: check_balances,
7: check_expenses,
}
def __init__(self, SpreadSheet):
self._choice = 0
self.exit = False
self.Spreadsheet = Spreadsheet
def is_exit(self):
return self.exit
def get_choice(self):
print("~~~~~~ MAIN MENU ~~~~~~~")
print("1:ADD, 4:LIST-30, 5:LIST-5, 6:CHECK-BALANCE, 7:.CHECK-EXPENSES 9: Quit")
self._choice = int(input("Enter your choice : "))
if self._choice == 9:
self.exit = True
return self._choice
def switch_choice(self):
func = self.switcher.get(self._choice, lambda: "Invalid choice")
func()
if __name__ == "__main__":
AccountSheet = Spreadsheet(SPREADSHEET_ID)
sheet = AccountSheet.getSheet()
# sheet = get_all_in_one_log()
# list_last_five_trans()
choice = Choice(AccountSheet)
while choice.is_exit() == False:
choice.get_choice()
choice.switch_choice()
print("Exiting out.. kind regards!")
| 27.638889
| 123
| 0.574372
| 708
| 5,970
| 4.679379
| 0.264124
| 0.031391
| 0.047087
| 0.056142
| 0.245095
| 0.198008
| 0.174464
| 0.174464
| 0.158165
| 0.121944
| 0
| 0.017074
| 0.274037
| 5,970
| 215
| 124
| 27.767442
| 0.747347
| 0.011893
| 0
| 0.175141
| 0
| 0.016949
| 0.2095
| 0.049194
| 0
| 0
| 0
| 0
| 0
| 1
| 0.079096
| false
| 0
| 0.028249
| 0.016949
| 0.19209
| 0.096045
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
abac1a3c955f1251fb90f89726bf46a055cf6faa
| 1,513
|
py
|
Python
|
bot.py
|
googlewaitme/picture-bot
|
ecdbb7b925f11c073e654a403aca4c91d7cbde6d
|
[
"MIT"
] | null | null | null |
bot.py
|
googlewaitme/picture-bot
|
ecdbb7b925f11c073e654a403aca4c91d7cbde6d
|
[
"MIT"
] | null | null | null |
bot.py
|
googlewaitme/picture-bot
|
ecdbb7b925f11c073e654a403aca4c91d7cbde6d
|
[
"MIT"
] | null | null | null |
import telebot
import settings
import helpers
from models import User
bot = telebot.TeleBot(settings.token, parse_mode=None)
users = dict()
@bot.message_handler(commands=['start', 'help'])
def send_help(message):
if not message.chat.id in users:
users[message.chat.id] = User()
bot.reply_to(message, settings.help_message)
@bot.message_handler(commands=['list_themes'])
def send_themes_list(message):
bot.reply_to(message, settings.themes_list)
@bot.message_handler(commands=['list_picture_formats'])
def send_generators_list(message):
bot.reply_to(message, settings.generators_list)
@bot.message_handler(func=helpers.check_new_image_format)
def set_new_image_format(message):
if not message.chat.id in users:
users[message.chat.id] = User()
users[message.chat.id].set_image_generator(message.text)
bot.reply_to(message, 'Новый формат изображения установлен!')
@bot.message_handler(func=helpers.check_new_color_theme)
def set_color_theme(message):
if not message.chat.id in users:
users[message.chat.id] = User()
users[message.chat.id].set_color_theme(message.text)
bot.reply_to(message, 'Новая тема установлена!')
@bot.message_handler(func=lambda message: True)
def send_image(message):
print(message.chat.id)
if not message.chat.id in users:
bot.reply_to(message, settings.help_message)
else:
helpers.generate_image(text=message.text, user=users[message.chat.id], filename='photo.jpg')
photo = open('photo.jpg', 'rb')
bot.send_photo(message.chat.id, photo)
bot.polling()
| 28.54717
| 94
| 0.777925
| 230
| 1,513
| 4.930435
| 0.265217
| 0.116402
| 0.137566
| 0.095238
| 0.512346
| 0.441799
| 0.392416
| 0.179894
| 0.179894
| 0.179894
| 0
| 0
| 0.09121
| 1,513
| 52
| 95
| 29.096154
| 0.824727
| 0
| 0
| 0.230769
| 0
| 0
| 0.078652
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.102564
| 0
| 0.25641
| 0.025641
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
abb2466001716bd862791946936f478b4e0f91ce
| 787
|
py
|
Python
|
tests/test_measuring_from_threads.py
|
tqsd/EQSN_python
|
823a315b1c2f5658cded19d8c97e579ce7285c42
|
[
"MIT"
] | 3
|
2020-05-03T15:09:41.000Z
|
2021-12-17T11:26:34.000Z
|
tests/test_measuring_from_threads.py
|
tqsd/EQSN_python
|
823a315b1c2f5658cded19d8c97e579ce7285c42
|
[
"MIT"
] | 5
|
2020-03-13T10:03:39.000Z
|
2020-07-09T12:56:04.000Z
|
tests/test_measuring_from_threads.py
|
tqsd/EQSN_python
|
823a315b1c2f5658cded19d8c97e579ce7285c42
|
[
"MIT"
] | 1
|
2020-05-03T15:06:24.000Z
|
2020-05-03T15:06:24.000Z
|
import threading
import random
import time
from eqsn import EQSN
def test_measure_from_threads():
q_sim = EQSN()
def measure_or_hadamard(_id):
n = random.randrange(10, 20, 1)
for _ in range(n):
time.sleep(0.1)
q_sim.H_gate(_id)
nr_threads = 10
ids = [str(x) for x in range(nr_threads)]
for _id in ids:
q_sim.new_qubit(_id)
id1 = ids[0]
for c in ids:
if c != id1:
q_sim.cnot_gate(id1, c)
thread_list = []
for _id in ids:
t = threading.Thread(target=measure_or_hadamard, args=(_id,))
t.start()
thread_list.append(t)
for t in thread_list:
t.join()
q_sim.stop_all()
if __name__ == "__main__":
test_measure_from_threads()
exit(0)
| 19.195122
| 69
| 0.584498
| 120
| 787
| 3.508333
| 0.416667
| 0.047506
| 0.071259
| 0.104513
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025735
| 0.308767
| 787
| 40
| 70
| 19.675
| 0.748162
| 0
| 0
| 0.066667
| 0
| 0
| 0.010165
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.133333
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
abb2d80690576788c51726bbb153b052e20309fe
| 1,464
|
py
|
Python
|
_tests/test_permute_columns.py
|
allisonCYWu/utility_functions
|
d2c6246c96b9cd5e8c01292dd38ab0d572971698
|
[
"Apache-2.0"
] | null | null | null |
_tests/test_permute_columns.py
|
allisonCYWu/utility_functions
|
d2c6246c96b9cd5e8c01292dd38ab0d572971698
|
[
"Apache-2.0"
] | 4
|
2019-11-18T13:24:08.000Z
|
2020-02-05T19:49:59.000Z
|
_tests/test_permute_columns.py
|
allisonCYWu/utility_functions
|
d2c6246c96b9cd5e8c01292dd38ab0d572971698
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from unittest import TestCase
from utility_functions.stats_functions import permute_columns
from utility_functions.databricks_uf import has_column
from connect2Databricks.spark_init import spark_init
if 'spark' not in locals():
spark, sqlContext, setting = spark_init()
sc = spark.sparkContext
class TestPermuteColumns(TestCase):
def test_permute_columns(self):
data = spark.createDataFrame([(1, 'a', 'a'),
(2, 'b', 'b'),
(3, 'c', 'c'),
(4, 'd', 'd'),
(5, 'e', 'e')],
['id', 'col1', 'col2'])
permuted_data = permute_columns(data,
columns_to_permute = ['col1', 'col2'],
column_to_order = 'id',
ind_permute = False)
permuted_data.show()
self.assertTrue(has_column(permuted_data, 'rand_id'))
self.assertTrue(has_column(permuted_data, 'rand_col1'))
self.assertTrue(has_column(permuted_data, 'rand_col2'))
self.assertEqual(permuted_data.select('rand_col1').collect(), permuted_data.select('rand_col2').collect())
self.assertNotEqual(permuted_data.select('col1').collect(), permuted_data.select('rand_col1').collect())
if __name__ == '__main__':
unittest.main()
| 40.666667
| 114
| 0.555328
| 147
| 1,464
| 5.238095
| 0.394558
| 0.14026
| 0.093506
| 0.08961
| 0.280519
| 0.280519
| 0.151948
| 0
| 0
| 0
| 0
| 0.016194
| 0.325137
| 1,464
| 35
| 115
| 41.828571
| 0.763158
| 0
| 0
| 0
| 0
| 0
| 0.067623
| 0
| 0
| 0
| 0
| 0
| 0.178571
| 1
| 0.035714
| false
| 0
| 0.178571
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
abb442950c4d7f407a450c76baca3bbbde03c64f
| 24,857
|
py
|
Python
|
cogs/fun.py
|
ZetDude/KalevBot
|
fcf8c1502d3d9c85917ca151f9fb2cf4f3713086
|
[
"MIT"
] | 3
|
2017-10-28T21:07:58.000Z
|
2018-05-05T12:14:05.000Z
|
cogs/fun.py
|
ZetDude/KalevBot
|
fcf8c1502d3d9c85917ca151f9fb2cf4f3713086
|
[
"MIT"
] | 4
|
2017-09-08T17:44:31.000Z
|
2017-10-09T15:10:23.000Z
|
cogs/fun.py
|
ZetDude/KalevBot
|
fcf8c1502d3d9c85917ca151f9fb2cf4f3713086
|
[
"MIT"
] | 4
|
2017-09-03T15:37:47.000Z
|
2017-11-15T20:15:59.000Z
|
"""Fun commands that don't do anything really productive
night, thank, shipname, shipcount, ship, hug, pecan, fortune"""
# -*- coding: utf-8 -*-
import pickle
import random
import sqlite3 as lite
import subprocess
import discord
from discord.ext import commands
from lib import shipname_module as improved_shipname, customconverter as cconv, obot
def search(values, search_for):
"Finds all the values in dict `values` where `search_for` is somewhere in the key"
found_values = [] # Initialize an empty list that will be the final list.
for k in values: # Iterate through every key in the given dictionary.
value_string = str(values[k]) # The corresponding value for the key we are currently on.
if str(search_for) in str(k): # If the string we are looking for is in the key.
found_values.append([k, value_string])
# Append the value and the key to the final list.
return found_values # Return the final list.
def remove_duplicates(values):
"Return the list `values` with duplicates removed"
# I'm going to be honest, I just found this on StackOverflow so I have no idea how it works.
seen = set()
seen_add = seen.add
values = [x for x in values if not (x in seen or seen_add(x))]
return values
class FunCog():
"fun fun fun fun fun fun"
def __init__(self, bot):
self.bot = bot
type(self).__name__ = "Fun"
@commands.command(name='night', aliases=['n', 'goodnight', 'nacht', 'öö', 'ööd', 'oyasumi',
'\u304a\u3084\u3059\u307f'],
help=(r"Wish someone a good night using a super cute kaomoji! ^_^"),
brief="Wish someone a good night.")
async def night(self, ctx, *, target_user=None):
"""Wish a good night to `target_user`, with a kaomoji emoticon in front.
`target_user` is anything pertaining to the target user or member that
lib.customconverter.HybridConverter can detect.
`target_user` defaults to None and can be left blank.
`target_user` can also be the argument "-list", in which case the bot returns all the
kaomoji emoticons associated with this command.
"""
# Define the list of kaomoji emoticons the bot will be using. Because of discord formatting
# special characters are escaped with a \.
kaomoji = [r"お(^o^)や(^O^)す(^。^)みぃ(^-^)ノ゙",
r" .。.:\*・゚☆Goodヾ(\*´Д`(\*゚ω゚\* )Night☆.。.:\*・゚",
r" – =͟͟͞ (¦3[▓▓])",
r" 。・:\*:・゚★,。・=^∇^\*=,。・:\*:・゚☆",
r"☆~\*.(UωU\*)おやすみぃ…\*~☆",
r"|・ω・`)おやすみぃ♪", ]
selected_kaomoji = random.choice(kaomoji)
if target_user is None: # If the user does not supply a target user...
await ctx.send(f"{selected_kaomoji} Good night!") # Return a generic response.
elif target_user == "-list": # -list flag...
await ctx.send("\n".join(kaomoji)) # Join together all the kaomoji and send them.
else: # If the target user is actually given.
try:
target_user = await cconv.HybridConverter().convert(ctx, target_user)
await ctx.send(f"{selected_kaomoji} Good night, {target_user.name}!")
except commands.BadArgument: # HybridConverter fails...
# Fall back to just using the inputted string with no conversion.
await ctx.send(f"{selected_kaomoji} Good night, {target_user}!")
@commands.command(name='thank', aliases=['thanks', 'arigato', 'arigatou', 'arigatoo',
'merci', 'arigatō', 'danke', 'aitah', 'aitäh',
'\u3042\u308a\u304c\u3068\u3046'],
help=(r"Thank someone using a super cute kaomoji! ^_^"),
brief="Thank someone.")
async def thank(self, ctx, *, target_user=None):
"""Thank `target_user`, with a kaomoji emoticon in front.
`target_user` is anything pertaining to the target user or member that
lib.customconverter.HybridConverter can detect.
`target_user` defaults to None and can be left blank.
`target_user` can also be the argument "-list", in which case the bot returns all the
kaomoji emoticons associated with this command.
"""
# The list of kaomoji emoticons the bot will be using. Because of discord formatting special
# characters are escaped with a \.
kaomoji = [r"♪(・ω・)ノ",
r"(\*ゝω・)ノ",
r"゚・:,。★\(^-^ )♪ありがとう♪( ^-^)/★,。・:・゚",
r"(★^O^★)",
r"☆\*:.。. o(≧▽≦)o .。.:\*☆",
r"(ノ^_^)ノ",
r"(ノ゚▽゚)ノ",
r"(ノ´ヮ´)ノ\*:・゚✧",
r"(\*^3^)/\~☆",
r"<(\_ \_\*)> アリガトォ",
r"ありがとぅございますっっヽ(●´∀\`)人(´∀\`●)ノ",
r"ありがとうございましたm(\*-ω-)m",
r"+。:.゚ヽ(\*´∀)ノ゚.:。+゚ァリガトゥ"
]
selected_kaomoji = random.choice(kaomoji)
if target_user is None: # If the user does not supply a target user.
await ctx.send(f"{selected_kaomoji} Thank you!") # Return a generic response.
elif target_user == "-list": # -list flag
await ctx.send("\n".join(kaomoji)) # Join together all the kaomoji and send them.
else: # If the target user is actually given.
try:
target_user = await cconv.HybridConverter().convert(ctx, target_user)
if target_user == ctx.bot.user: # If the user's target is the bot itself...
# "u2764" is the black heart unicode character
await ctx.send(f"You're welcome, {ctx.author.name}! \\\u2764")
elif target_user == ctx.author: # If the user attempts to thank themself... sass.
await ctx.send(f"Why would I need to thank you, {ctx.author.name}?")
else: # If no special cases were found...
await ctx.send(f"{selected_kaomoji} Thank you, {target_user.name}!")
except commands.BadArgument: # HybridConverter fails...
# Fall back to just using the inputted string with no conversion
await ctx.send(f"{selected_kaomoji} Thank you, {target_user}!")
@commands.command(name='shipname', aliases=['name'],
help="Create the shipname of two people.")
async def shipname(self, ctx, name1, name2):
"""Uses pecan's shipname module to create the shipname of two names.
`name1` is the first name.
`name2` is the first name.
"""
# Request a shipname from pecan's shipname module™ using names from arguments.
names_shipname = improved_shipname.shipname(name1, name2) # I don't know how it works.
await ctx.send(f"{ctx.author.name}, I shall call it \"**{names_shipname}**\"!")
@commands.command(name='shipcount', aliases=['count'],
help="Get amount of ships created between people",
usage="[users...] OR -top")
async def shipcount(self, ctx, *ships_in):
"""Show all the people someone has been shipped with when given one person, or the amount
of ships between certain people when given multiple.
`ships_in` is the people/person to get info of.
`ships_in` can also be the argument "-top", in which case only the top 10 most shipped pairs
will be shown."""
shipfile = obot.SHIPFILE # File where all shipping information is stored.
ships = [] # This list will contain the user(s) we want to get information about.
for i in ships_in: # Convert all the given member to actual users.
if i == "-top": # skip the -top flag.
continue
ships.append(await cconv.HybridConverter().convert(ctx, i))
ships = remove_duplicates(ships)
# Format the IDs into a format: 'id1:id2:id3...'.
# This format is needed as this is how ship information is stored in the shipfile.
ships_format = ':'.join([str(x.id) for x in ships])
try:
# Open the shipfile and unpickle it. The returning format is a dictionary.
# -> {'id1:id2:id3...': count}
with open(shipfile, "rb") as opened_file:
lines = pickle.load(opened_file)
except FileNotFoundError:
await ctx.send(f"I couldn't find the shipping file ({shipfile})")
return
except pickle.UnpicklingError:
await ctx.send("Shipping data file is corrupt, cannot fetch data.")
return
if not ships: # No arguments... default to author.
ships = [ctx.author]
if len(ships) == 1: # Find all the ships that user is contained in.
return_message = ""
if "-top" in ships_in: # -top flag is given...
# The data dict is turned into a list, and is sorted by the count, then reversed
# so that the biggest are in the beginning, and then only the first 10 are fetched.
mentions = list(reversed(sorted(list(lines.items()), key=lambda a: a[1])))[:10]
else: # no flag is given...
# All the lines that contain the target are fetched
mentions = search(lines, ships[0].id)
mentions = reversed(sorted(mentions, key=lambda a: a[1]))
for k, j in mentions: # Iterate through all fetched lines.
usern = []
# take the 'id1:id2:id3...' format and split it into the IDs it is composed from.
for i in k.split(":"):
try:
# Convert the ID which is stored into an user.
found_user = ctx.bot.get_user(int(i))
if found_user is None: # No server shared with target user.
# NOTE: The function get_user_info() works regardless of the target
# sharing servers with the bot, however, it is terribly slow.
found_user = await ctx.bot.get_user_info(i)
usern.append(found_user.name)
except discord.NotFound: # User doesn't exist on discord...?
usern.append(i) # Fall back to just showing the ID
times_message = "time" if j == 1 else "times"
return_message += f"{' x '.join(usern)}: shipped {j} {times_message}\n"
# example -> "User1 x User2: shipped 3 times"
if not return_message: # no results found...
return_message = (f"{ships[0].name}, you haven't been shipped with anybody yet, "
f"but I still love you!")
await ctx.send(f"```\n{return_message}\n```")
return
else: # The user gives multple users as arguments...
# Find how many times those specific users have been shipped before.
occ = lines.get(ships_format, 0)
times_message = "time" if j == 1 else "times"
await ctx.send(f"{ctx.author}, they have been shipped {occ} {times_message} before")
@commands.command(name='ship', aliases=['otp'],
help="Ship someone with someone else.",
brief="Ship someone with someone else. uwu")
async def ship(self, ctx, *ships: cconv.HybridConverter):
shipfile = obot.SHIPFILE # File where all the shipping information is stored.
if ctx.message.author in ships: # Uses attempts to ship themself
await ctx.send((f"{ctx.message.author.name}, "
"I don't think you can ship yourself with someone"))
return
ships = remove_duplicates(ships)
if len(ships) < 2:
await ctx.send(f"{ctx.message.author.name}, mention at least two people in the message")
return
ships_names = [x.name for x in ships]
# Format the IDs into a format: 'id1:id2:id3...'.
# This format is needed as this is how ship information is stored in the shipfile.
# The list is sorted by ID for consistency between runs.
ships_format = ":".join(sorted([str(x.id) for x in ships], key=int))
try:
with open(shipfile, "rb") as opened_file:
# Open the shipfile and unpickle it. The returning format is a dictionary.
# -> {'id1:id2:id3...': count}
lines = pickle.loads(opened_file.read())
except FileNotFoundError:
lines = {}
with open(shipfile, 'w'):
await ctx.send("Created new ship file")
except pickle.UnpicklingError:
await ctx.send("Ship file is corrupt, cannot fetch data.")
return
occ = lines.get(ships_format, 0) # Times the target users have already been shipped.
times_message = "time" + ("" if occ == 1 else "s")
lines[ships_format] = occ + 1 # Increase count by one
with open(shipfile, 'wb') as opened_file: # Write the new data
pickle.dump(lines, opened_file)
shipname = ""
if len(ships) == 2: # If there are two names, we can make a shipname
# Request a shipname from pecan's shipname module™
final = improved_shipname.shipname(*ships_names)
shipname = "I shall call it \"**" + final + "**\""
await ctx.send((f"{ctx.message.author.name} totally ships {' and '.join(ships_names)}"
f"\nThey have been shipped {occ} {times_message} before"
f"\n{shipname}"))
@commands.command(name='hug', aliases=['\U0001f917'],
help="Give someone a hug!")
async def hug(self, ctx, *target_users):
"""Hug target user, and count how many times you have hugged people in total
TODO: Make hugs server-based
`target_users` are the users to hug (or just 1 user).
`target_users` can also be the argument "-top <num>", in which case the top <num> people
with the highest amount of hugs given will be returned.
"""
target_users = list(target_users)
con = lite.connect("important/data.db") # Database where hug data is stored
if target_users[0] == "-top": # If the first argument given is the flag -top...
try: # The second argument is how many people to fetch.
fetch_amount = int(target_users[1])
if fetch_amount < 0:
await ctx.send(f"That's less than zero, {ctx.author}.")
except ValueError:
await ctx.send(f"That's not an integer, {ctx.author}.")
return
except IndexError: # If an amount isn't given, default to 5
fetch_amount = 5
with con:
try:
# Order all entries by amount, descending, then get the first `fetch_amount`
cur = con.cursor()
cur.execute("SELECT * FROM Hug ORDER BY Hugs DESC")
rows = cur.fetchall()[:fetch_amount]
combine = f"```\nTOP {fetch_amount} HUGGERS:\n---------\n"
for row in rows:
# Convert the ID to an user.
target_user = ctx.bot.get_user(row[0])
if target_user is None: # No server shared with target.
try:
# NOTE: The function get_user_info() works regardless of the target
# sharing servers with the bot, however, it is terribly slow.
target_user = await ctx.bot.get_user_info(row[0])
except discord.NotFound: # User doesn't exist on Discord.
target_user = None # Give up and default to None.
combine += target_user.name if not None else row[0]
combine += " - " + str(row[1]) + "\n"
combine += "\n```"
except lite.OperationalError as err: # sql error...
if str(err) == "no such table: Hug": # No table exists...
# Create a new one and inform the user
cur.execute("CREATE TABLE Hug(id INT NOT NULL UNIQUE, Hugs INT);")
await ctx.send("No hug data was recorded, created file now.")
else: # If actual users are given.
targets = []
for i in target_users: # Go through all the targets...
try: # and try to convert them using HybridConverter...
converted_member = await cconv.HybridConverter().convert(ctx, i)
except commands.BadArgument: # but if that fails...
converted_member = "*" + i + "*" # default to the string that the user gave.
targets.append(converted_member)
targets = remove_duplicates(targets)
# If the list contains just the author or nobody
if [ctx.author] == targets or not targets:
await ctx.send(f"Who are you going to hug, {ctx.author.name}? Yourself?")
return
if ctx.author in targets: # Remove the user from the list of targets.
targets.remove(ctx.author)
with con:
try: # Get the data of the author from the database
cur = con.cursor()
cur.execute(
"SELECT COALESCE(Hugs, 0) FROM Hug WHERE id = ?", (ctx.author.id, ))
row = cur.fetchone()
hugs = 0 if row is None else row[0]
except lite.OperationalError as err:
if str(err) == "no such table: Hug":
cur.execute(
"CREATE TABLE Hug(id INT NOT NULL UNIQUE, Hugs INT);")
await ctx.send("Created new hugs database table.")
hugs = 0
times_message = "hug" + ("" if hugs == 1 else "s")
# Create a second list which is just a copy of the targets
mentions_without_bot = list(targets)
for user in mentions_without_bot[::1]:
# Need to iterate backwards to not jump over anything when removing.
if isinstance(user, str): # Get rid of everything that isn't an user.
mentions_without_bot.remove(user)
elif user.bot: # Get rid of bots.
mentions_without_bot.remove(user)
hugs += len(mentions_without_bot) # Increase the hug tally of the author.
# Update database.
cur.execute("INSERT OR IGNORE INTO Hug VALUES(?, ?)", (ctx.author.id, hugs))
cur.execute("UPDATE Hug SET Hugs=? WHERE id=?", (hugs, ctx.author.id))
if ctx.bot.user.id in [x.id for x in targets if not isinstance(x, str)]:
# If the bot itself is in the targets list.
if len(targets) > 1: # If other users are hugged alongside it.
# Join all other targets.
recievers_without_self = list(targets)
recievers_without_self.remove(ctx.bot.user)
recievers = " and ".join([x.name if not isinstance(
x, str) else x for x in recievers_without_self])
combine = (f"{ctx.author.name} gave {recievers} a hug, and I hug you back! "
f"\U0001f917 (+{len(mentions_without_bot)}; {hugs} "
f"{times_message} in total)")
else: # Only the bot is hugged.
combine = (f"I hug you back, {ctx.author.name}! "
f"\U0001f917 (+{len(mentions_without_bot)}; {hugs} "
f"{times_message} in total)")
elif targets:
# Join all targets.
recievers = " and ".join(
[x.name if not isinstance(x, str) else x for x in targets])
combine = (f"{ctx.author.name} gave {recievers} a hug! "
f"(+{len(mentions_without_bot)}; {hugs} "
f"{times_message} in total)")
else: # I don't know if this clause if ever executed but I'm too scared to remove it.
combine = (f"{ctx.author.name}, you've hit the else clause on line 381 of fun.py, "
f"please report it to someone.")
await ctx.send(combine)
@commands.command(name='pecan', aliases=['p'],
help="Random quote from pecan.")
async def pecan(self, ctx, *, input_text=None):
"""Get a random or certain line from the old IRC chat logs of pecan.
`input_text` is the integer code of the line to fetch. Lookup is 1-indexed.
`input_text` can also be left empty, in which case it defaults to None and just gives a
random line.
`input_text` can also be a string, in which case that string is searched for in the corpus,
and a random line containing that string is returned.
"""
try:
with open(obot.PECAN_CORPUS, "r") as opened_file:
data = opened_file.read().splitlines() # Get all the lines of the file
if input_text is None: # No argument given
num = random.choice(range(len(data))) # Get a random number.
quote = data[num] # Get the quote corresponding to that number
await ctx.send(f"{num + 1}: `{quote}`")
else: # An argument is given
try: # Test if is the number for a certain line
num = int(input_text)
num = num - 1
if num < 0:
await ctx.send("baka! number is negative!")
return
elif num == 0:
await ctx.send("baka! file is 1-indexed!")
return
quote = data[num]
except IndexError:
await ctx.send(f"baka! number is over {len(data)}!")
return
except ValueError: # Not an int
# Find all entries where target string is included.
if input_text.startswith('"') and input_text.endswith('"'):
input_text = input_text[1:-1]
found_entries = []
for j, i in enumerate(data):
if input_text.lower() in i.lower(): # case-insensitive
found_entries.append((j, i))
if not found_entries: # No entries found...
await ctx.send(f"{ctx.author.name}, nothing contains `{input_text}`")
return
response = random.choice(found_entries) # pick a random valid entry.
await ctx.send((f"`{input_text}` (total {len(found_entries)}) - "
f"{response[0]+1}: `{response[1]}`"))
# example -> `pecan` (total 40) - 1813: `I might meet the other pecan.`
except FileNotFoundError:
await ctx.send(f"{ctx.author.name}, no pecan corpus file is included or it is "
f"configured incorrectly. Download it at "
f"<http://97.107.129.215/pecan.txt>")
@commands.command(name='fortune', aliases=['f'],
help="Unix fortune.")
async def fortune(self, ctx):
"Return a random unix fortune line."
fortune_msg = subprocess.check_output("fortune").decode("utf-8")
fortune_msg = fortune_msg[:1988] + "\u2026" if len(fortune_msg) > 1990 else fortune_msg
await ctx.send("```\n" + fortune_msg + "\n```")
@shipname.error
async def shipname_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send(f"{ctx.author.name}, please use two names as arguments")
@shipcount.error
@ship.error
async def ship_error(self, ctx, error):
if isinstance(error, commands.BadArgument):
await ctx.send(f"{ctx.author.name}, {error.args[0]}")
def setup(bot):
bot.add_cog(FunCog(bot))
| 53.80303
| 100
| 0.544796
| 3,133
| 24,857
| 4.285988
| 0.179381
| 0.022639
| 0.032172
| 0.024203
| 0.366994
| 0.323429
| 0.284182
| 0.246127
| 0.216265
| 0.203902
| 0
| 0.010554
| 0.352014
| 24,857
| 461
| 101
| 53.91974
| 0.818526
| 0.211691
| 0
| 0.26284
| 0
| 0.003021
| 0.213555
| 0.025659
| 0
| 0
| 0
| 0.002169
| 0
| 1
| 0.012085
| false
| 0
| 0.024169
| 0
| 0.081571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
abb5b6a5115ed18e2c351f8835ee0c1d15acd865
| 3,151
|
py
|
Python
|
kross/base_push.py
|
pcorbel/kross
|
b7c282ecefc24066c3623257407b2f4ad02964bf
|
[
"Apache-2.0"
] | 7
|
2019-07-16T19:10:57.000Z
|
2019-07-29T07:50:39.000Z
|
kross/base_push.py
|
pcorbel/kross
|
b7c282ecefc24066c3623257407b2f4ad02964bf
|
[
"Apache-2.0"
] | null | null | null |
kross/base_push.py
|
pcorbel/kross
|
b7c282ecefc24066c3623257407b2f4ad02964bf
|
[
"Apache-2.0"
] | null | null | null |
import os
import re
import shutil
import attr
import click
import subprocess32 as subprocess
import yaml
from kross.utils import echo, get_std
@attr.s
class BasePush(object):
push_args = attr.ib(type=tuple)
registry_target = attr.ib()
manifest_directory = attr.ib()
qemu_archs = attr.ib()
push_manifest_cmd = attr.ib()
@registry_target.default
def default_registry_target(self):
registry_target = self.push_args[-1]
if re.match(r"(.*?)/(.*?):(.*)", registry_target):
return registry_target
# fmt: off
raise click.ClickException("""Cannot find target image.
Please pass it in the <repository/image_name:image_tag> format.""")
# fmt: on
@manifest_directory.default
def default_manifest_directory(self):
# Generic registry handling
manifest_directory = "{}/.docker/manifests/{}".format(
os.path.expanduser("~"),
self.registry_target.replace("/", "_").replace(":", "-"),
)
if os.path.exists(manifest_directory):
return manifest_directory
# Default non-explicit registry handling
else:
manifest_directory = "{}/.docker/manifests/docker.io_{}".format(
os.path.expanduser("~"),
self.registry_target.replace("/", "_").replace(":", "-"),
)
if os.path.exists(manifest_directory):
return manifest_directory
@qemu_archs.default
def default_qemu_archs(self): # pylint: disable=no-self-use
arch_file = os.path.dirname(os.path.abspath(__file__)) + "/archs.yaml"
with click.open_file(arch_file, "r") as stream:
archs = yaml.load(stream=stream, Loader=yaml.UnsafeLoader)
return archs.get("archs")
@push_manifest_cmd.default
def default_push_manifest_cmd(self):
push_manifest_cmd = "docker manifest push {}".format(self.registry_target)
return push_manifest_cmd
def remove_manifest_directory(self):
echo("Purging manifest directory.", verbose_only=True)
shutil.rmtree(path=self.manifest_directory, ignore_errors=True)
def exec_push_manifest(self):
try:
subprocess.run(
self.push_manifest_cmd.split(),
check=True,
stdout=get_std(),
stderr=get_std(),
)
except subprocess.CalledProcessError:
raise click.ClickException("Cannot push manifest list to registry.")
def __str__(self):
# fmt: off
result = """
base_push:
- registry_target: {self.registry_target}
- manifest_directory: {self.manifest_directory}
- push_manifest_cmd: {self.push_manifest_cmd}
- push_args: """.format(**locals())
for push_arg in self.push_args:
result += "{} ".format(push_arg)
result += """
- qemu_archs: """
for arch in self.qemu_archs:
result += "{name} ".format(**arch)
result += "\n"
return result
# fmt: on
| 33.521277
| 83
| 0.597271
| 341
| 3,151
| 5.284457
| 0.319648
| 0.132075
| 0.066593
| 0.031632
| 0.193119
| 0.157603
| 0.157603
| 0.119867
| 0.119867
| 0.119867
| 0
| 0.001333
| 0.285624
| 3,151
| 93
| 84
| 33.88172
| 0.7992
| 0.039987
| 0
| 0.106667
| 0
| 0
| 0.169289
| 0.05472
| 0
| 0
| 0
| 0
| 0
| 1
| 0.093333
| false
| 0.013333
| 0.106667
| 0
| 0.36
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
abb63be3752095ce741f646a1eeb76e388b01a69
| 5,655
|
py
|
Python
|
pylogue/menu_driver.py
|
tirinox/pylogue
|
35b85f93a0d91b60a0d1640126b54d427b60712a
|
[
"MIT"
] | null | null | null |
pylogue/menu_driver.py
|
tirinox/pylogue
|
35b85f93a0d91b60a0d1640126b54d427b60712a
|
[
"MIT"
] | null | null | null |
pylogue/menu_driver.py
|
tirinox/pylogue
|
35b85f93a0d91b60a0d1640126b54d427b60712a
|
[
"MIT"
] | null | null | null |
from comm.telegram import TelegramCommunicationBot
from telegram import Message
from util.misc import wlog, die, enumerate_2d_array
import traceback
class MessageResponse:
def __init__(self, text, user_id):
self.user_id = user_id
self.text = str(text).strip()
class MessageRequest:
pass
class MenuOption:
def __init__(self, caption, key=None):
self.caption = caption
self.key = key
class BotMenuBase:
def root_generator(self):
yield from ()
def __init__(self, bot: TelegramCommunicationBot):
self.bot = bot
self.user_id = None
self.next_message = None
self.buffer = []
self.last_options = []
self.last_hide_kb = False
self.last_is_kbd_compact = False
def flush(self):
if self.buffer:
full_message = '\n\n'.join(self.buffer)
self.bot.send_message(user_id=self.user_id,
message=full_message,
hide_keyboard=self.last_hide_kb,
options=self.last_options,
resize_keyboard=self.last_is_kbd_compact)
self.buffer = []
def notify(self, text, hide_kb=False, options=list(), flush=False, compact_kbd=False):
if self.user_id and text:
self.last_hide_kb = hide_kb
self.last_options = options
self.last_is_kbd_compact = compact_kbd
self.buffer.append(text)
if flush or len(self.buffer) >= 4:
self.flush()
else:
wlog("Warning: can't notify; you need set user_id and send a valid text")
def notify_error(self, text):
self.notify('{}\nType /quit or /q if you give up.'.format(text), flush=True)
def set_next_message(self, msg: MessageRequest):
self.next_message = msg
def stop(self):
raise StopIteration
def gen_ask_until_validated(self, validator, text_on_fail='Try again.'):
while True:
r = yield MessageRequest()
text = r.text
if text in ['/quit', '/q']:
self.notify('😤 Dialog stopped.')
self.stop()
return None
value = validator(text)
if value is None:
if text_on_fail:
self.notify_error(text_on_fail)
else:
return value
def gen_confirm(self, request_text: str, yes_option='Yes, I confirm', no_option='No, cancel please'):
text = '🤝 Do you confirm this operation❓\n{}'.format(request_text)
result = yield from self.gen_select_option(text, [
MenuOption('✅ {}'.format(yes_option), 'yes'),
MenuOption('🚫 {}'.format(no_option), 'no')
])
return result == 'yes'
def gen_select_option(self, request_text: str, options: list, compact_kbd=True) -> [str, int]:
key_table = {}
n_options = 0
def extract_string_for_keyboard(item, index):
if isinstance(item, MenuOption):
text = item.caption
key = item.key if item.key else index
else:
return 'error: each item must be a MenuOption instance'
caption = '{}. {}'.format(index, text)
key_table[str(index)] = key
key_table[text] = key
key_table[caption] = key
key_table[key] = key
nonlocal n_options
n_options += 1
return caption
keyboard_numbered = enumerate_2d_array(options, 1, extract_string_for_keyboard)
if n_options == 0: # no options provided
self.stop()
return ''
message_text = request_text
while True:
self.notify(message_text, options=keyboard_numbered, compact_kbd=compact_kbd)
self.flush()
answer = yield MessageRequest()
answer_text = str(answer.text).strip()
if answer_text in ['/quit', '0', 'q']:
self.notify('😤 Dialog stopped.')
self.stop()
return ''
else:
if answer_text in key_table:
return key_table[answer_text]
else:
message_text = '😡 Please select a valid option or send a number ' \
'between 1 and {n}. Use /quit or 0 or q to exit. {orig_text}'.format(
orig_text=request_text,
n=n_options)
class BotMenuDriver:
def set_user_id(self, user_id):
self.menu.user_id = user_id
def on_message(self, msg: Message):
try:
user_id = self.bot.user_id_from_msg(msg)
self.set_user_id(user_id)
text = msg.text
self.gen.send(MessageResponse(text, user_id))
except StopIteration:
wlog('Restarting menu generator.')
self.start_generator()
except Exception as e:
wlog('Menu exception: {}'.format(e))
traceback.print_exc()
def start_generator(self):
self.gen = self.menu.root_generator()
try:
next(self.gen)
except:
wlog("Error! Couldn't start the menu generator")
def attach_to_bot(self, bot: TelegramCommunicationBot):
self.bot = bot
self.bot.message_handler = self.on_message
self.set_user_id(bot.get_allowed_chat())
self.start_generator()
def __init__(self, menu: BotMenuBase):
self.bot = None
self.menu = menu
self.gen = None
| 32.687861
| 105
| 0.561804
| 671
| 5,655
| 4.530551
| 0.233979
| 0.035526
| 0.019737
| 0.013816
| 0.090789
| 0.055263
| 0.055263
| 0.025658
| 0.025658
| 0
| 0
| 0.002698
| 0.344651
| 5,655
| 173
| 106
| 32.687861
| 0.815704
| 0.00336
| 0
| 0.171429
| 0
| 0
| 0.086779
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.128571
| false
| 0.007143
| 0.028571
| 0
| 0.25
| 0.007143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
abb674cdfb0957870cdb52790ce97f51c0c2b5eb
| 10,358
|
py
|
Python
|
hiburn/actions.py
|
OpenHisiIpCam/hiburn
|
71d8ab3c5a87401a60cf125d441e25f8b7d3282c
|
[
"MIT"
] | 8
|
2020-04-06T08:47:26.000Z
|
2021-02-23T17:10:12.000Z
|
hiburn/actions.py
|
OpenHisiIpCam/hiburn
|
71d8ab3c5a87401a60cf125d441e25f8b7d3282c
|
[
"MIT"
] | 2
|
2020-05-14T16:59:33.000Z
|
2021-06-19T23:48:35.000Z
|
hiburn/actions.py
|
OpenHisiIpCam/hiburn
|
71d8ab3c5a87401a60cf125d441e25f8b7d3282c
|
[
"MIT"
] | 2
|
2020-05-02T22:49:01.000Z
|
2020-05-12T02:39:26.000Z
|
import logging
import ipaddress
import os
from . import utils
from . import ymodem
# -------------------------------------------------------------------------------------------------
class Action:
@classmethod
def _run(cls, client, config, args):
return cls(client, config).run(args)
def __init__(self, client, config):
self.client = client
self.config = config
@classmethod
def add_arguments(cls, parser):
pass
def run(self, args):
raise NotImplementedError()
# some helper methods are below
@property
def host_ip(self):
return ipaddress.ip_interface(self.config["net"]["host_ip_mask"]).ip
@property
def host_netmask(self):
return ipaddress.ip_interface(self.config["net"]["host_ip_mask"]).netmask
@property
def device_ip(self):
return ipaddress.ip_address(self.config["net"]["device_ip"])
def configure_network(self):
""" Common method to configure network on target device
"""
self.client.setenv(
ipaddr=self.device_ip,
serverip=self.host_ip,
netmask=self.host_netmask
)
def upload_files(self, *args):
utils.upload_files_via_tftp(self.client, args, listen_ip=str(self.host_ip))
def upload_y_files(self, *args):
for fname, addr in args:
with open(fname, "rb") as f:
data = f.read()
self.client.loady(addr, data)
def add_actions(parser, *actions):
subparsers = parser.add_subparsers(title="Action")
for action in actions:
action_parser = subparsers.add_parser(action.__name__,
help=action.__doc__.strip() if action.__doc__ else None
)
action.add_arguments(action_parser)
action_parser.set_defaults(action=action._run)
# -------------------------------------------------------------------------------------------------
class printenv(Action):
""" Print U-Boot environment variables
"""
def run(self, args):
result = self.client.printenv()
print("\n".join(result))
# -------------------------------------------------------------------------------------------------
class ping(Action):
""" Configure network on device and ping host
"""
def run(self, args):
self.configure_network()
result = self.client.ping(self.host_ip)[-1]
if not result.endswith("is alive"):
raise RuntimeError("network is unavailable")
print("Network is fine")
# -------------------------------------------------------------------------------------------------
class download(Action):
""" Download data from device's RAM via TFTP
"""
@classmethod
def add_arguments(cls, parser):
parser.add_argument("--dst", type=str, default="./dump", help="Destination file")
parser.add_argument("--addr", type=utils.hsize2int, required=True, help="Address to start downloading from")
parser.add_argument("--size", type=utils.hsize2int, required=True, help="Amount of bytes to be downloaded")
def run(self, args):
self.configure_network()
utils.download_files_via_tftp(self.client, (
(args.dst, args.addr, args.size),
), listen_ip=str(self.host_ip))
# -------------------------------------------------------------------------------------------------
class upload(Action):
""" Upload data to device's RAM via TFTP
"""
@classmethod
def add_arguments(cls, parser):
parser.add_argument("--src", type=str, required=True, help="File to be uploaded")
parser.add_argument("--addr", type=utils.hsize2int, required=True, help="Destination address in device's memory")
def run(self, args):
self.configure_network()
self.upload_files((args.src, args.addr))
# -------------------------------------------------------------------------------------------------
class boot(Action):
""" Upload Kernel and RootFS images into device's RAM and boot it
"""
@classmethod
def add_arguments(cls, parser):
parser.add_argument("--uimage", type=str, required=True, help="Kernel UImage file")
parser.add_argument("--rootfs", type=str, required=True, help="RootFS image file")
parser.add_argument("--upload-addr", type=utils.hsize2int,
help="Start address to upload into")
parser.add_argument("--initrd-size", type=utils.hsize2int,
help="Amount of RAM for initrd (actual size of RootFS image file by default)")
parser.add_argument("--no-wait", action="store_true",
help="Don't wait end of serial output and exit immediately after sending 'bootm' command")
parser.add_argument("--ymodem", action="store_true",
help="Upload via serial (ymodem protocol)")
bootargs_group = parser.add_argument_group("bootargs", "Kernel's boot arguments")
bootargs_group.add_argument("--bootargs-ip", metavar="IP", type=str,
help="Literal value for `ip=` parameter")
bootargs_group.add_argument("--bootargs-ip-gw", metavar="IP",type=str,
help="Value for <gw-ip> of `ip=` parameter")
bootargs_group.add_argument("--bootargs-ip-hostname", metavar="HOSTNAME", type=str,
help="Value for <hostname> of `ip=` parameter")
bootargs_group.add_argument("--bootargs-ip-dns1", metavar="IP", type=str,
help="Value for <dns0-ip> of `ip=` parameter")
bootargs_group.add_argument("--bootargs-ip-dns2", metavar="IP", type=str,
help="Value for <dns1-ip> of `ip=` parameter")
def get_bootargs_ip(self, args):
if args.bootargs_ip is not None:
return args.bootargs_ip
fmt = "{client_ip}:{server_ip}:{gw_ip}:{netmask}:{hostname}:{device}:{autoconf}:{dns0_ip}:{dns1_ip}:{ntp0_ip}"
return fmt.format(
client_ip=self.device_ip,
server_ip=self.host_ip,
gw_ip=args.bootargs_ip_gw or self.host_ip,
netmask=self.host_netmask,
hostname=args.bootargs_ip_hostname or "camera1",
device="",
autoconf="off",
dns0_ip=args.bootargs_ip_dns1 or self.host_ip,
dns1_ip=args.bootargs_ip_dns2 or "",
ntp0_ip=""
)
def run(self, args):
uimage_size = os.path.getsize(args.uimage)
rootfs_size = os.path.getsize(args.rootfs) if args.initrd_size is None else args.initrd_size
alignment = self.config["mem"]["alignment"]
if args.upload_addr is None:
mem_end_addr = self.config["mem"]["start_addr"] + self.config["mem"]["linux_size"]
rootfs_addr = utils.align_address_down(alignment, mem_end_addr - rootfs_size)
uimage_addr = utils.align_address_down(alignment, rootfs_addr - uimage_size)
else:
uimage_addr = utils.align_address_up(alignment, args.upload_addr) # to ensure alignment
rootfs_addr = utils.align_address_up(alignment, uimage_addr + uimage_size)
logging.info("Kernel uImage upload addr {:#x}; RootFS image upload addr {:#x}".format(
uimage_addr, rootfs_addr
))
if args.ymodem:
self.upload_y_files((args.uimage, uimage_addr), (args.rootfs, rootfs_addr))
else:
self.configure_network()
self.upload_files((args.uimage, uimage_addr), (args.rootfs, rootfs_addr))
bootargs = ""
bootargs += "mem={} ".format(self.config["mem"]["linux_size"])
bootargs += "console={} ".format(self.config["linux_console"])
bootargs += "ip=" + self.get_bootargs_ip(args) + " "
bootargs += "mtdparts=hi_sfc:512k(boot) "
bootargs += "root=/dev/ram0 ro initrd={:#x},{}".format(rootfs_addr, rootfs_size)
logging.info("Load kernel with bootargs: {}".format(bootargs))
self.client.setenv(bootargs=bootargs)
resp = self.client.bootm(uimage_addr, wait=(not args.no_wait))
if resp is None:
print("'bootm' command has been sent. Hopefully booting is going on well...")
else:
print(
"Output ended with next lines:\n" +
"... {} lines above\n".format(len(resp)) +
"----------------------------------------\n" +
"\n".join(" {}".format(l.strip()) for l in resp[-10:]) +
"\n----------------------------------------"
)
# -------------------------------------------------------------------------------------------------
class download_sf(Action):
""" Download data from device's SPI flasg via TFTP
"""
@classmethod
def add_arguments(cls, parser):
parser.add_argument("--probe", type=str, required=True, help="'sf probe' arguments")
parser.add_argument("--size", type=utils.hsize2int, required=True, help="Amount of bytes to be downloaded")
parser.add_argument("--offset", type=utils.hsize2int, default=0, help="Flash offset")
parser.add_argument("--dst", type=str, default="./dump.bin", help="Destination file")
parser.add_argument("--addr", type=utils.hsize2int, help="Devices's RAM address read data from flash into")
def run(self, args):
DEFAULT_MEM_ADDR = self.config["mem"]["start_addr"] + (1 << 20) # 1Mb
self.configure_network()
self.client.sf_probe(args.probe)
mem_addr = DEFAULT_MEM_ADDR if args.addr is None else args.addr
logging.info("Read {} bytes from {} offset of SPI flash into memory at {}...".format(args.size, args.offset, mem_addr))
self.client.sf_read(mem_addr, args.offset, args.size)
utils.download_files_via_tftp(self.client, (
(args.dst, mem_addr, args.size),
), listen_ip=str(self.host_ip))
# -------------------------------------------------------------------------------------------------
class upload_y(Action):
""" Upload data to device's RAM via serial (ymodem)
"""
@classmethod
def add_arguments(cls, parser):
pass
# parser.add_argument("--src", type=str, required=True, help="File to be uploaded")
# parser.add_argument("--addr", type=utils.hsize2int, required=True, help="Destination address in device's memory")
def run(self, args):
self.client.loady(b"bla bla bla!")
| 41.266932
| 127
| 0.577621
| 1,217
| 10,358
| 4.756779
| 0.179129
| 0.045604
| 0.055795
| 0.019347
| 0.427017
| 0.379858
| 0.325272
| 0.263258
| 0.236828
| 0.18967
| 0
| 0.0042
| 0.218382
| 10,358
| 250
| 128
| 41.432
| 0.710845
| 0.139216
| 0
| 0.223464
| 0
| 0.005587
| 0.195483
| 0.026426
| 0
| 0
| 0
| 0
| 0
| 1
| 0.134078
| false
| 0.011173
| 0.027933
| 0.022346
| 0.240223
| 0.03352
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
abb837a9e83e5c81cca4c68f2584b6bea603d190
| 955
|
py
|
Python
|
tree/inorder_traversal.py
|
vandesa003/leetcode_algo
|
8ebefef685cd25d8e149592f24e3552c8903504a
|
[
"MIT"
] | 1
|
2022-03-23T01:33:42.000Z
|
2022-03-23T01:33:42.000Z
|
tree/inorder_traversal.py
|
vandesa003/leetcode_algo
|
8ebefef685cd25d8e149592f24e3552c8903504a
|
[
"MIT"
] | null | null | null |
tree/inorder_traversal.py
|
vandesa003/leetcode_algo
|
8ebefef685cd25d8e149592f24e3552c8903504a
|
[
"MIT"
] | 1
|
2020-07-24T03:32:30.000Z
|
2020-07-24T03:32:30.000Z
|
"""
中序遍历:DFS或者栈来实现。
leetcode No.94
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
# dfs 递归实现
def inorderTraversal(self, root: TreeNode) -> List[int]:
ans = []
def dfs(node):
if not node:
return node
dfs(node.left)
ans.append(node.val)
dfs(node.right)
dfs(root)
return ans
# dfs 栈实现(非递归)
def inorderTraversal_stack(root):
if not root:
return root
stack = []
ans = []
while len(stack)>0 or root:
# 先遍历完所有左子树
if root is not None:
stack.append(root)
root = root.left
# 左子树遍历完后,弹出父节点,遍历右子树
else:
root = stack.pop()
ans.append(root.val)
root = root.right
return ans
| 22.738095
| 60
| 0.517277
| 113
| 955
| 4.327434
| 0.40708
| 0.042945
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006734
| 0.37801
| 955
| 41
| 61
| 23.292683
| 0.816498
| 0.275393
| 0
| 0.16
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12
| false
| 0
| 0
| 0
| 0.32
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
abc5a6e1c8eb95e21a2424169d41cb33a6c8f98e
| 1,199
|
py
|
Python
|
rasa-rest-explode-brain.py
|
nprovotorov/rasa-rest-api-loadtest
|
31ee3308d9b11ff98e56172801943c3016c5bf93
|
[
"MIT"
] | 6
|
2021-01-05T02:53:23.000Z
|
2022-02-17T07:53:36.000Z
|
rasa-rest-explode-brain.py
|
nprovotorov/rasa-rest-api-loadtest
|
31ee3308d9b11ff98e56172801943c3016c5bf93
|
[
"MIT"
] | null | null | null |
rasa-rest-explode-brain.py
|
nprovotorov/rasa-rest-api-loadtest
|
31ee3308d9b11ff98e56172801943c3016c5bf93
|
[
"MIT"
] | 1
|
2021-01-26T01:08:57.000Z
|
2021-01-26T01:08:57.000Z
|
import random
import uuid
from locust import HttpUser, task, between
apiUrl = "/webhooks/rest/webhook" # Rasa Core REST API endpoint
# apiUrl = "/core/webhooks/rest/webhook" # Rasa X REST API endpoint
class RasaRestExplodeBrainUser(HttpUser):
wait_time = between(3, 10)
def on_start(self):
self.name = str(uuid.uuid1())
with open("questions.txt") as f:
self.questions = f.readlines()
with open("messages.txt") as f:
self.messages = f.readlines()
@task(1)
def sayHello(self):
payload = {"sender": self.name, "message": "Hello!"}
self.client.post(apiUrl, json=payload)
@task(2)
def askQuestion(self):
questionNumber = random.randint(0, len(self.questions)-1)
question = self.questions[questionNumber]
payload = {"sender": self.name, "message": question}
self.client.post(apiUrl, json=payload)
@task(3)
def saySomethingRandom(self):
messageNumber = random.randint(0, len(self.messages)-1)
message = self.messages[messageNumber]
payload = {"sender": self.name, "message": message}
self.client.post(
apiUrl, json=payload)
| 29.975
| 67
| 0.633862
| 141
| 1,199
| 5.375887
| 0.397163
| 0.042216
| 0.067282
| 0.083113
| 0.299472
| 0.133245
| 0.092348
| 0
| 0
| 0
| 0
| 0.012022
| 0.236864
| 1,199
| 39
| 68
| 30.74359
| 0.816393
| 0.076731
| 0
| 0.068966
| 0
| 0
| 0.083409
| 0.019946
| 0
| 0
| 0
| 0
| 0
| 1
| 0.137931
| false
| 0
| 0.103448
| 0
| 0.310345
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
abcb48c4f2cf0da6f255272f48b3091be08733b7
| 2,545
|
py
|
Python
|
celery_project/daily_quote/quotes.py
|
engineervix/django-celery-sample
|
9bb92a129bdd66d6c2259a41c690436c5c8316dc
|
[
"BSD-3-Clause"
] | null | null | null |
celery_project/daily_quote/quotes.py
|
engineervix/django-celery-sample
|
9bb92a129bdd66d6c2259a41c690436c5c8316dc
|
[
"BSD-3-Clause"
] | 194
|
2021-03-01T01:08:33.000Z
|
2021-12-07T22:55:41.000Z
|
celery_project/daily_quote/quotes.py
|
engineervix/django-celery-sample
|
9bb92a129bdd66d6c2259a41c690436c5c8316dc
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
"""quotery.py
This script is part of a daily celery task to fetch a quote
from a JSON file and save it to the database.
There should only be one quote at a time in the database.
On the template, we simply retrieve the quote and
display it on the website as "Quote of the Day".
The idea is to have a drop-in script that simulates an API call
so that in future, if we find a suitable quote API, we can easily
refactor accordingly and use the new API, without making significant
changes to the codebase.
"""
import os
import logging
import traceback
import re
from datetime import datetime
import json
from celery_project.daily_quote.models import Quote
logger = logging.getLogger(__name__)
def quote_index(start_date):
"""
Determine which quote (index) to retrieve from the given JSON file
based on the current date.
Args:
start_date (str): the reference start date in YYYY-MM-DD format.
This date corresponds to index 0.
Returns:
int: the index to retrieve
"""
today = datetime.today()
date_format = "%Y-%m-%d"
try:
initial_date = datetime.strptime(start_date, date_format)
except ValueError:
var = traceback.format_exc()
logger.error(var)
initial_date = datetime(2021, 3, 1)
days_since_start = (today - initial_date).days
idx = days_since_start
return abs(idx) # in case we have a negative int!
def quote_of_the_day():
"""
let's get that quote
"""
dir_path = os.path.dirname(os.path.realpath(__file__))
json_file = os.path.join(dir_path, "quotes.json")
with open(json_file, "r") as read_file:
data = json.load(read_file)
num_of_quotes = len(data)
idx = quote_index("2021-03-01")
while idx >= num_of_quotes:
idx = idx - num_of_quotes
quote = data[idx]
return quote
def sync_quote_of_the_day():
"""
We get our quote and save it to our Quote Model in the Database
We then delete older entry(ies)
"""
qod = quote_of_the_day()
# lets make sure we don't save the same entry more than once
if not Quote.objects.filter(quote=qod["text"]).exists():
quote_entry = Quote(
quote=qod["text"],
author_name=qod["author"],
)
quote_entry.save()
# Quote.objects.filter(created__lt=datetime.today()).delete()
# delete all but first:
Quote.objects.filter(
id__in=list(Quote.objects.values_list("pk", flat=True)[1:])
).delete()
| 26.510417
| 72
| 0.666012
| 387
| 2,545
| 4.237726
| 0.452196
| 0.019512
| 0.02439
| 0.031707
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008831
| 0.243615
| 2,545
| 95
| 73
| 26.789474
| 0.843117
| 0.418861
| 0
| 0
| 0
| 0
| 0.032999
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.166667
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
abcccf0a171f8f65a41202cdaec0c176a6cff770
| 7,718
|
py
|
Python
|
backend/flask_app/server.py
|
aPorousRock/Angular2Flask
|
bac2fd68207bcfa6c33b85abddac8737375d407d
|
[
"MIT"
] | null | null | null |
backend/flask_app/server.py
|
aPorousRock/Angular2Flask
|
bac2fd68207bcfa6c33b85abddac8737375d407d
|
[
"MIT"
] | null | null | null |
backend/flask_app/server.py
|
aPorousRock/Angular2Flask
|
bac2fd68207bcfa6c33b85abddac8737375d407d
|
[
"MIT"
] | null | null | null |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""Entry point for the server application."""
import json
import logging
import traceback
from datetime import datetime
from flask import Response, request, jsonify, current_app
from gevent.wsgi import WSGIServer
from flask_jwt_simple import (
JWTManager, jwt_required, create_jwt, get_jwt_identity, get_jwt
)
from .http_codes import Status
from .factory import create_app, create_user
import os
import json
import nltk
import gensim
import numpy as np
from gensim import corpora, models, similarities
import pickle
import pandas as pd
from keras.models import load_model,Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras.callbacks import ModelCheckpoint
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import hamming_loss
from keras import backend as K
K.set_image_dim_ordering('th')
Imagemodel = Sequential()
Imagemodel.add(Convolution2D(32, kernel_size=(3, 3),padding='same',input_shape=(3 , 100, 100)))
Imagemodel.add(Activation('relu'))
Imagemodel.add(Convolution2D(64, (3, 3)))
Imagemodel.add(Activation('relu'))
Imagemodel.add(MaxPooling2D(pool_size=(2, 2)))
Imagemodel.add(Dropout(0.25))
Imagemodel.add(Convolution2D(64,(3, 3), padding='same'))
Imagemodel.add(Activation('relu'))
Imagemodel.add(Convolution2D(64, 3, 3))
Imagemodel.add(Activation('relu'))
Imagemodel.add(MaxPooling2D(pool_size=(2, 2)))
Imagemodel.add(Dropout(0.25))
Imagemodel.add(Flatten())
Imagemodel.add(Dense(512))
Imagemodel.add(Activation('relu'))
Imagemodel.add(Dropout(0.5))
Imagemodel.add(Dense(9))
Imagemodel.add(Activation('sigmoid'))
Imagemodel.load_weights("/Users/ajinkya.parkar@ibm.com/Documents/deep/keras_multilabel/multilabel/weights.11-0.72365.hdf5")
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
Imagemodel.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])
from IPython.display import Image
import cv2
logger = logging.getLogger(__name__)
app = create_app()
jwt = JWTManager(app)
model=load_model('LSTM5000.h5')
mod = gensim.models.Word2Vec.load('/Users/ajinkya.parkar@ibm.com/Downloads/apnews_sg/word2vec.bin');
@app.before_first_request
def init():
"""Initialize the application with defaults."""
create_user(app)
@jwt.jwt_data_loader
def add_claims_to_access_token(identity):
"""Explicitly set identity and claims for jwt."""
if identity == 'admin':
roles = 'admin'
else:
roles = 'peasant'
now = datetime.utcnow()
return {
'exp': now + current_app.config['JWT_EXPIRES'],
'iat': now,
'nbf': now,
'sub': identity,
'roles': roles
}
@app.route("/api/logout", methods=['POST'])
@jwt_required
def logout():
"""Logout the currently logged in user."""
# TODO: handle this logout properly, very weird implementation.
identity = get_jwt_identity()
if not identity:
return jsonify({"msg": "Token invalid"}), Status.HTTP_BAD_UNAUTHORIZED
logger.info('Logged out user !!')
return 'logged out successfully', Status.HTTP_OK_BASIC
@app.route('/api/login', methods=['POST'])
def login():
"""View function for login view."""
logger.info('Logged in user')
params = request.get_json()
username = params.get('username', None)
password = params.get('password', None)
if not username:
return jsonify({"msg": "Missing username parameter"}), Status.HTTP_BAD_REQUEST
if not password:
return jsonify({"msg": "Missing password parameter"}), Status.HTTP_BAD_REQUEST
# TODO Check from DB here
if username != 'admin' or password != 'admin':
return jsonify({"msg": "Bad username or password"}), Status.HTTP_BAD_UNAUTHORIZED
# Identity can be any data that is json serializable
# TODO: rather than passing expiry time here explicitly, decode token on client side. But I'm lazy.
ret = {'jwt': create_jwt(identity=username), 'exp': datetime.utcnow() + current_app.config['JWT_EXPIRES']}
return jsonify(ret), 200
@app.route('/api/protected', methods=['POST'])
@jwt_required
def get_data():
"""Get dummy data returned from the server."""
jwt_data = get_jwt()
data = {'Heroes': ['Hero1', 'Hero2', 'Hero3']}
json_response = json.dumps(data)
return Response(json_response,
status=Status.HTTP_OK_BASIC,
mimetype='application/json')
@app.route('/api/chat', methods=['POST'])
def get_chat():
"""Get dummy data returned from the server."""
jwt_data = get_jwt()
params = request.get_json()
myText = params.get('myText', None)
print(myText)
print(params)
sentend=np.ones((300,),dtype=np.float32)
sent=nltk.word_tokenize(myText)
sentvec = [mod[w] for w in sent if w in mod.vocab]
sentvec[14:]=[]
sentvec.append(sentend)
if len(sentvec)<15:
for i in range(15-len(sentvec)):
sentvec.append(sentend)
sentvec=np.array([sentvec])
predictions = model.predict(sentvec)
outputlist=[mod.most_similar([predictions[0][i]])[0][0] for i in range(5)]
output=' '.join(outputlist)
print(output)
data = {'Heroes': ['Hero1', 'Hero2', 'Hero3']}
json_response = json.dumps(output)
return Response(json_response,
status=Status.HTTP_OK_BASIC,
mimetype='application/json')
@app.route('/api/image', methods=['POST'])
def get_Image():
"""Get dummy data returned from the server."""
jwt_data = get_jwt()
params = request.get_json()
myText = params.get('myText', None)
print(myText)
print(params)
img = cv2.imread(myText)
img = cv2.resize(img,(100,100))
img = img.transpose((2,0,1))
img = img.astype('float32')
img = img/255
img = np.expand_dims(img,axis=0)
pred = Imagemodel.predict(img)
y_pred = np.array([1 if pred[0,i]>=0.6 else 0 for i in range(pred.shape[1])])
finalOutput = []
for key, value in enumerate(y_pred):
if key == 0 and value == 1:
finalOutput.append("Good for lunch")
if key == 1 and value == 1:
finalOutput.append("Good for dinner")
if key == 2 and value == 1:
finalOutput.append("Takes reservation")
if key == 3 and value == 1:
finalOutput.append("Outdoor seating")
if key == 4 and value == 1:
finalOutput.append("Restaurent is expensive")
if key == 5 and value == 1:
finalOutput.append("Has alchohol")
if key == 6 and value == 1:
finalOutput.append("Has Table Service")
if key == 7 and value == 1:
finalOutput.append("Ambience is classy")
if key == 8 and value == 1:
finalOutput.append("Good for kids")
print(finalOutput)
data = {'Heroes': ['Hero1', 'Hero2', 'Hero3']}
json_response = json.dumps(finalOutput)
return Response(json_response,
status=Status.HTTP_OK_BASIC,
mimetype='application/json')
def main():
"""Main entry point of the app."""
try:
port = 8080
ip = '0.0.0.0'
http_server = WSGIServer((ip, port),
app,
log=logging,
error_log=logging)
print("Server started at: {0}:{1}".format(ip, port))
http_server.serve_forever()
except Exception as exc:
logger.error(exc.message)
logger.exception(traceback.format_exc())
finally:
# Do something here
pass
| 31.246964
| 123
| 0.651075
| 998
| 7,718
| 4.941884
| 0.318637
| 0.047445
| 0.016423
| 0.036496
| 0.305758
| 0.237632
| 0.214315
| 0.194242
| 0.194242
| 0.166261
| 0
| 0.024814
| 0.216766
| 7,718
| 246
| 124
| 31.373984
| 0.791067
| 0.083053
| 0
| 0.209677
| 0
| 0.005376
| 0.122278
| 0.022491
| 0
| 0
| 0
| 0.004065
| 0
| 1
| 0.043011
| false
| 0.032258
| 0.145161
| 0
| 0.241935
| 0.037634
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
abce73ab5ea7484cda032b4a04d06bfbe7876a23
| 16,481
|
py
|
Python
|
pyfarm/agent/http/api/assign.py
|
guidow/pyfarm-agent
|
bb5d464f9f6549a3db3529a93e3d9f388b365586
|
[
"Apache-2.0"
] | null | null | null |
pyfarm/agent/http/api/assign.py
|
guidow/pyfarm-agent
|
bb5d464f9f6549a3db3529a93e3d9f388b365586
|
[
"Apache-2.0"
] | null | null | null |
pyfarm/agent/http/api/assign.py
|
guidow/pyfarm-agent
|
bb5d464f9f6549a3db3529a93e3d9f388b365586
|
[
"Apache-2.0"
] | null | null | null |
# No shebang line, this module is meant to be imported
#
# Copyright 2014 Oliver Palmer
# Copyright 2014 Ambient Entertainment GmbH & Co. KG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from uuid import uuid4
try:
from httplib import (
ACCEPTED, BAD_REQUEST, CONFLICT, SERVICE_UNAVAILABLE, OK)
except ImportError: # pragma: no cover
from http.client import (
ACCEPTED, BAD_REQUEST, CONFLICT, SERVICE_UNAVAILABLE, OK)
import traceback
from functools import partial
from twisted.internet import reactor
from twisted.internet.defer import DeferredList
from voluptuous import Schema, Required
from pyfarm.core.enums import WorkState, AgentState
from pyfarm.agent.config import config
from pyfarm.agent.http.core.client import post, http_retry_delay
from pyfarm.agent.http.api.base import APIResource
from pyfarm.agent.logger import getLogger
from pyfarm.agent.utility import request_from_master
from pyfarm.agent.sysinfo.memory import free_ram
from pyfarm.agent.utility import JOBTYPE_SCHEMA, TASKS_SCHEMA, JOB_SCHEMA
from pyfarm.jobtypes.core.internals import InsufficientSpaceError
from pyfarm.jobtypes.core.jobtype import JobType
from pyfarm.agent.utility import dumps
logger = getLogger("agent.http.assign")
class Assign(APIResource):
isLeaf = False # this is not really a collection of things
# Schemas used for validating the request before
# the target function will handle it. These make
# assertions about what kind of input data is required
# or not based on the agent's internal code.
SCHEMAS = {
"POST": Schema({
Required("job"): JOB_SCHEMA,
Required("jobtype"): JOBTYPE_SCHEMA,
Required("tasks"): TASKS_SCHEMA})}
def __init__(self, agent):
self.agent = agent
def post(self, **kwargs):
if request_from_master(kwargs["request"]):
config.master_contacted()
request = kwargs["request"]
request_data = kwargs["data"]
# First, get the resources we have *right now*. In some cases
# this means using the functions in pyfarm.core.sysinfo because
# entries in `config` could be slightly out of sync with the system.
memory_free = free_ram()
cpus = config["agent_cpus"]
requires_ram = request_data["job"].get("ram")
requires_cpus = request_data["job"].get("cpus")
if ("agent_id" in request_data and
request_data["agent_id"] != config["agent_id"]):
logger.error("Wrong agent_id in assignment: %s. Our id is %s",
request_data["agent_id"], config["agent_id"])
return (
dumps({"error": "You have the wrong agent. "
"I am %s." % config["agent_id"],
"agent_id": config["agent_id"]}),
BAD_REQUEST
)
elif self.agent.reannounce_lock.locked:
logger.warning("Temporarily rejecting assignment because we "
"are in the middle of a reannounce.")
return (
dumps({"error": "Agent cannot accept assignments because of a "
"reannounce in progress. Try again shortly."}),
SERVICE_UNAVAILABLE
)
elif self.agent.shutting_down:
logger.error("Rejecting assignment because the agent is in the "
"process of shutting down.")
return (
dumps({"error": "Agent cannot accept assignments because it is "
"shutting down."}),
SERVICE_UNAVAILABLE
)
elif "restart_requested" in config \
and config["restart_requested"] is True:
logger.error("Rejecting assignment because of scheduled restart.")
return (
dumps({"error": "Agent cannot accept assignments because of a "
"pending restart."}),
SERVICE_UNAVAILABLE
)
elif "agent_id" not in config:
logger.error(
"Agent has not yet connected to the master or `agent_id` "
"has not been set yet.")
return (
dumps({"error": "agent_id has not been set in the config"}),
SERVICE_UNAVAILABLE
)
# Do we have enough ram?
elif requires_ram is not None and requires_ram > memory_free:
logger.error(
"Task %s requires %sMB of ram, this agent has %sMB free. "
"Rejecting Task %s.",
request_data["job"]["id"], requires_ram, memory_free,
request_data["job"]["id"])
config["free_ram"] = memory_free
return (
dumps({"error": "Not enough ram",
"agent_ram": memory_free,
"requires_ram": requires_ram}),
BAD_REQUEST
)
# Do we have enough cpus (count wise)?
elif requires_cpus is not None and requires_cpus > cpus:
logger.error(
"Task %s requires %s CPUs, this agent has %s CPUs. "
"Rejecting Task %s.",
request_data["job"]["id"], requires_cpus, cpus,
request_data["job"]["id"])
return (
dumps({"error": "Not enough cpus",
"agent_cpus": cpus,
"requires_cpus": requires_cpus}),
BAD_REQUEST
)
new_task_ids = set(task["id"] for task in request_data["tasks"])
for assignment in config["current_assignments"].itervalues():
existing_task_ids = set(x["id"] for x in assignment["tasks"])
# If the assignment is identical to one we already have
if existing_task_ids == new_task_ids:
logger.debug(
"Ignoring repeated assignment of the same batch")
return dumps({"id": assignment["id"]}), ACCEPTED
# If there is only a partial overlap
elif existing_task_ids & new_task_ids:
logger.error("Rejecting assignment with partial overlap with "
"existing assignment.")
unknown_task_ids = new_task_ids - existing_task_ids
return (
dumps({"error": "Partial overlap of tasks",
"rejected_task_ids": list(unknown_task_ids)}),
CONFLICT
)
if not config["agent_allow_sharing"]:
for jobtype in config["jobtypes"].itervalues():
num_finished_tasks = (len(jobtype.finished_tasks) +
len(jobtype.failed_tasks))
if len(jobtype.assignment["tasks"]) > num_finished_tasks:
logger.error("Rejecting an assignment that would require "
"agent sharing")
return (
dumps({
"error": "Agent does not allow multiple "
"assignments",
"rejected_task_ids": list(new_task_ids)}),
CONFLICT
)
assignment_uuid = uuid4()
request_data.update(id=assignment_uuid)
config["current_assignments"][assignment_uuid] = request_data
logger.debug("Accepted assignment %s: %r",
assignment_uuid, request_data)
logger.info("Accept assignment from job %s with %s tasks",
request_data["job"]["title"], len(request_data["tasks"]))
def assignment_failed(result, assign_id):
logger.error(
"Assignment %s failed, result: %r, removing.", assign_id, result)
logger.error(result.getTraceback())
if (len(config["current_assignments"]) <= 1 and
not self.agent.shutting_down):
config["state"] = AgentState.ONLINE
self.agent.reannounce(force=True)
# Do not mark the assignment as failed if the reason for failing
# was that we ran out of disk space
failed = not isinstance(result.value, InsufficientSpaceError)
assignment = config["current_assignments"].pop(assign_id)
if "jobtype" in assignment:
jobtype_id = assignment["jobtype"].pop("id", None)
if jobtype_id:
instance = config["jobtypes"].pop(jobtype_id, None)
instance.stop(
assignment_failed=failed,
avoid_reassignment=not failed,
error="Error in jobtype: %r. "
"Traceback: %s" % (result,
traceback.format_exc()))
def assignment_started(_, assign_id):
logger.debug("Assignment %s has started", assign_id)
config["state"] = AgentState.RUNNING
self.agent.reannounce(force=True)
def remove_assignment(_, assign_id):
assignment = config["current_assignments"].pop(assign_id)
if "jobtype" in assignment:
jobtype_id = assignment["jobtype"].pop("id", None)
if jobtype_id:
config["jobtypes"].pop(jobtype_id, None)
def assignment_stopped(_, assign_id):
logger.debug("Assignment %s has stopped", assign_id)
if (len(config["current_assignments"]) <= 1 and
not self.agent.shutting_down):
config["state"] = AgentState.ONLINE
self.agent.reannounce(force=True)
assignment = config["current_assignments"][assign_id]
if "jobtype" in assignment:
jobtype_id = assignment["jobtype"].pop("id", None)
if jobtype_id:
jobtype = config["jobtypes"].pop(jobtype_id, None)
updates_deferred = DeferredList(
jobtype.task_update_deferreds)
updates_deferred.addBoth(remove_assignment, assign_id)
else:
config["current_assignments"].pop(assign_id)
def restart_if_necessary(_): # pragma: no cover
if "restart_requested" in config and config["restart_requested"]:
stopping = config["agent"].stop()
stopping.addCallbacks(lambda _: reactor.stop(),
lambda _: reactor.stop())
def load_jobtype_failed(result, assign_id):
logger.error(
"Loading jobtype for assignment %s failed, removing.", assign_id)
traceback = result.getTraceback()
logger.debug("Got traceback")
logger.error(traceback)
assignment = config["current_assignments"].pop(assign_id)
# Mark all tasks as failed on master and set an error message
logger.debug("Marking tasks in assignment as failed")
def post_update(post_url, post_data, task, delay=0):
post_func = partial(post, post_url, data=post_data,
callback=lambda x: result_callback(
post_url, post_data, task, x),
errback=lambda x: error_callback(
post_url, post_data, task, x))
reactor.callLater(delay, post_func)
def result_callback(cburl, cbdata, task, response):
if 500 <= response.code < 600:
logger.error(
"Error while marking task %s as failed on master, "
"retrying", task["id"])
post_update(cburl, cbdata, task, delay=http_retry_delay())
elif response.code != OK:
logger.error(
"Could not mark task %s as failed, server response "
"code was %s", task["id"], response.code)
else:
logger.info(
"Marked task %s as failed on master", task["id"])
def error_callback(cburl, cbdata, task, failure_reason):
logger.error(
"Error while marking task %s as failed, retrying",
task["id"], failure_reason)
post_update(cburl, cbdata, task, delay=http_retry_delay())
for task in assignment["tasks"]:
url = "%s/jobs/%s/tasks/%s" % (
config["master_api"], assignment["job"]["id"], task["id"])
data = {
"state": WorkState.FAILED,
"last_error": traceback}
post_update(url, data, task)
# If the loading was partially successful for some reason, there
# might already be an entry for this jobtype in the config.
# Remove it if it exists.
if "jobtype" in assignment:
jobtype_id = assignment["jobtype"].pop("id", None)
if jobtype_id:
config["jobtypes"].pop(jobtype_id, None)
def loaded_jobtype(jobtype_class, assign_id):
# TODO: report error to master
if hasattr(jobtype_class, "getTraceback"):
logger.error(jobtype_class.getTraceback())
return
# TODO: add call to prepare_for_job
# TODO: add call to spawn_persistent_process
# Instance the job type and pass in the assignment data.
instance = jobtype_class(request_data)
if not isinstance(instance, JobType):
raise TypeError(
"Expected a subclass of "
"pyfarm.jobtypes.core.jobtype.JobType")
# TODO: add callback to cleanup_after_job
# TODO: add callback to stop persistent process
try:
started_deferred, stopped_deferred = instance._start()
started_deferred.addCallback(assignment_started, assign_id)
started_deferred.addErrback(assignment_failed, assign_id)
stopped_deferred.addCallback(assignment_stopped, assign_id)
stopped_deferred.addErrback(assignment_failed, assign_id)
stopped_deferred.addBoth(restart_if_necessary)
stopped_deferred.addBoth(
lambda *args: instance._remove_tempdirs())
stopped_deferred.addBoth(
lambda *args: instance._close_logs())
stopped_deferred.addBoth(
lambda *args: instance._upload_logfile())
except Exception as e:
logger.error("Error on starting jobtype, stopping it now. "
"Error was: %r. Traceback: %s", e,
traceback.format_exc())
instance.stop(assignment_failed=True,
error="Error while loading jobtype: %r. "
"Traceback: %s" %
(e, traceback.format_exc()))
assignment = config["current_assignments"].pop(assign_id)
if "jobtype" in assignment:
jobtype_id = assignment["jobtype"].pop("id", None)
if jobtype_id:
config["jobtypes"].pop(jobtype_id, None)
# Load the job type then pass the class along to the
# callback. No errback here because all the errors
# are handled internally in this case.
jobtype_loader = JobType.load(request_data)
jobtype_loader.addCallback(loaded_jobtype, assignment_uuid)
jobtype_loader.addErrback(load_jobtype_failed, assignment_uuid)
return dumps({"id": assignment_uuid}), ACCEPTED
| 44.663957
| 81
| 0.564225
| 1,747
| 16,481
| 5.170006
| 0.207785
| 0.018601
| 0.026572
| 0.011625
| 0.294287
| 0.24535
| 0.207042
| 0.16818
| 0.119132
| 0.10031
| 0
| 0.002147
| 0.349857
| 16,481
| 368
| 82
| 44.785326
| 0.840784
| 0.118075
| 0
| 0.264286
| 0
| 0
| 0.174408
| 0.002485
| 0
| 0
| 0
| 0.002717
| 0
| 1
| 0.042857
| false
| 0
| 0.071429
| 0
| 0.167857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
abcf771bb1eef36b8475fbd6b6801cc8c8d640fe
| 1,387
|
py
|
Python
|
examples/multi_agent.py
|
spMohanty/marlo
|
6ca3dc449fba58413b1797b28bb3e2374d62751f
|
[
"MIT"
] | 214
|
2018-07-26T13:48:36.000Z
|
2022-03-25T11:34:53.000Z
|
examples/multi_agent.py
|
spMohanty/marlo
|
6ca3dc449fba58413b1797b28bb3e2374d62751f
|
[
"MIT"
] | 47
|
2018-08-01T16:03:07.000Z
|
2022-02-12T12:46:09.000Z
|
examples/multi_agent.py
|
spMohanty/marLo
|
6ca3dc449fba58413b1797b28bb3e2374d62751f
|
[
"MIT"
] | 48
|
2018-07-27T15:49:01.000Z
|
2021-07-18T13:55:56.000Z
|
#!/usr/bin/env python
# Please ensure that you have two Minecraft clients running on port 10000 and
# port 10001 by doing :
# $MALMO_MINECRAFT_ROOT/launchClient.sh -port 10000
# $MALMO_MINECRAFT_ROOT/launchClient.sh -port 10001
import marlo
client_pool = [('127.0.0.1', 10000),('127.0.0.1', 10001)]
join_tokens = marlo.make('MarLo-MazeRunner-v0',
params={
"client_pool": client_pool,
"agent_names" :
[
"MarLo-Agent-0",
"MarLo-Agent-1"
]
})
# As this is a two-agent scenario,
# there will just two join tokens
assert len(join_tokens) == 2
@marlo.threaded
def run_agent(join_token):
env = marlo.init(join_token)
observation = env.reset()
done = False
count = 0
while not done:
_action = env.action_space.sample()
obs, reward, done, info = env.step(_action)
print("reward:", reward)
print("done:", done)
print("info", info)
env.close()
# Run agent-0
thread_handler_0, _ = run_agent(join_tokens[0])
# Run agent-1
thread_handler_1, _ = run_agent(join_tokens[1])
# Wait until Both the threads complete execution
thread_handler_0.join()
thread_handler_1.join()
print("Episode Run complete")
| 30.152174
| 78
| 0.579668
| 173
| 1,387
| 4.473988
| 0.468208
| 0.064599
| 0.046512
| 0.077519
| 0.093023
| 0.093023
| 0
| 0
| 0
| 0
| 0
| 0.057592
| 0.311464
| 1,387
| 45
| 79
| 30.822222
| 0.75288
| 0.256669
| 0
| 0
| 0
| 0
| 0.118511
| 0
| 0
| 0
| 0
| 0
| 0.033333
| 1
| 0.033333
| false
| 0
| 0.033333
| 0
| 0.066667
| 0.133333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
abd6eef90ad96992e821c7e62a12d3098278c5bf
| 1,191
|
py
|
Python
|
pyraminxolver/setup.py
|
Odder/PyraminXolver
|
f497a3936e63a51bb9de4d445db0a1399ae4b85c
|
[
"MIT"
] | 1
|
2020-02-16T11:11:35.000Z
|
2020-02-16T11:11:35.000Z
|
pyraminxolver/setup.py
|
Odder/PyraminXolver
|
f497a3936e63a51bb9de4d445db0a1399ae4b85c
|
[
"MIT"
] | 1
|
2019-10-06T07:19:56.000Z
|
2019-10-26T20:39:19.000Z
|
pyraminxolver/setup.py
|
Odder/PyraminXolver
|
f497a3936e63a51bb9de4d445db0a1399ae4b85c
|
[
"MIT"
] | 1
|
2019-11-22T15:53:29.000Z
|
2019-11-22T15:53:29.000Z
|
from collections import deque
import pickle
from . import Pyraminx, PYRAMINX_CASE_PATH
from multiprocessing import Pool, cpu_count
def setup():
graph = create_graph()
with open(PYRAMINX_CASE_PATH, 'wb') as f:
pickle.dump(graph, f, pickle.HIGHEST_PROTOCOL)
def create_graph():
with Pool(cpu_count()) as p:
graph = p.map(explore_node, [x for x in range(933120)])
graph = generate_depths(graph)
return graph
def explore_node(node):
state = Pyraminx.id_to_state(node)
node_values = [-1, -1, -1, -1, -1, -1, -1, -1, -1]
for i in range(1, 9):
transformation = Pyraminx.move_transformations[i - 1]
new_state = Pyraminx.apply_move(state, transformation)
new_id = Pyraminx.state_to_id(new_state)
node_values[i] = new_id
return node_values
def generate_depths(graph):
queue = deque()
graph[0][0] = 0
queue.append(0)
while queue:
i = queue.popleft()
depth = graph[i][0]
for edge in graph[i][1:]:
if graph[edge][0] == -1:
graph[edge][0] = depth + 1
queue.append(edge)
return graph
if __name__ == '__main__':
setup()
| 24.8125
| 63
| 0.618808
| 167
| 1,191
| 4.209581
| 0.347305
| 0.02276
| 0.029872
| 0.034139
| 0.012802
| 0.012802
| 0.012802
| 0.012802
| 0
| 0
| 0
| 0.031818
| 0.261125
| 1,191
| 47
| 64
| 25.340426
| 0.767045
| 0
| 0
| 0.055556
| 0
| 0
| 0.008396
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.305556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
abdc66f427d245a23a8cd8f7219011f041e2e90d
| 2,321
|
py
|
Python
|
APIFunctions/replace_resources.py
|
cul/archivesspace
|
9c088d4495cf1006c7d02ed2850224a9d28b35c1
|
[
"MIT"
] | 4
|
2018-12-13T16:18:30.000Z
|
2020-02-14T14:01:28.000Z
|
APIFunctions/replace_resources.py
|
cul/archivesspace
|
9c088d4495cf1006c7d02ed2850224a9d28b35c1
|
[
"MIT"
] | null | null | null |
APIFunctions/replace_resources.py
|
cul/archivesspace
|
9c088d4495cf1006c7d02ed2850224a9d28b35c1
|
[
"MIT"
] | 2
|
2019-09-03T19:15:24.000Z
|
2020-12-01T20:27:14.000Z
|
# Script to replace text in a designated field in a resource and post the resource back to API.
# Requirements:
# - ASFunctions.py
# - A csv of format repo,asid
# - sheetFeeder (optional, for reporting purposes)
import ASFunctions as asf
import json
from pprint import pprint
import re
import csv
from sheetFeeder import dataSheet
def main():
asf.setServer('Test')
# Google sheet used for reporting changes.
the_report_sheet=dataSheet('1wNO0t2j5G9U0hUmb7E-jLd4T5skTs1aRxN7HrlyZwEI','resources!A:Z')
id_file = 'resource_replacements.csv'
output_folder = 'output/resource_replacements'
# Read a list of repo and object ids (csv)
the_ids = []
ids = open(id_file)
for row in csv.reader(ids):
the_ids.append([row[0],row[1]])
ids.close()
# Search/replace patterns
the_search_pattern = 'NCC'
the_replace_pattern = 'NNC'
the_before_afters = []
the_heads = ['repo', 'asid','before', 'after']
the_before_afters.append(the_heads)
for an_obj in the_ids:
out_path = output_folder + '/' + an_obj[0] + '_' + an_obj[1] + '_old.json'
# read from API
x = asf.getResource(an_obj[0],an_obj[1])
# Save copy of existing object
print('Saving data to ' + out_path + '....')
f = open(out_path, "w+")
f.write(x)
f.close()
x = json.loads(x)
the_old_field_data = x['user_defined']['string_2']
y = x
y['user_defined']['string_2'] = re.sub(the_search_pattern, the_replace_pattern, x['user_defined']['string_2'])
if y['user_defined']['string_2'] == the_old_field_data:
the_new_field_data = "[no change]"
else:
the_new_field_data = y['user_defined']['string_2']
the_before_afters.append([an_obj[0], an_obj[1], '{string_2} ' + the_old_field_data, '{string_2} ' + the_new_field_data ])
# convert dict back to json for posting.
z = json.dumps(y)
# Post the fixed object back to API.
post = asf.postResource(an_obj[0], an_obj[1], z)
print(post)
# Report changes to Google Sheet
print('Writing before/after info to sheet...')
the_report_sheet.clear()
the_report_sheet.appendData(the_before_afters)
if __name__ == '__main__':
main()
| 22.980198
| 129
| 0.637225
| 325
| 2,321
| 4.289231
| 0.344615
| 0.032281
| 0.060976
| 0.064562
| 0.131277
| 0.090387
| 0
| 0
| 0
| 0
| 0
| 0.015927
| 0.242568
| 2,321
| 100
| 130
| 23.21
| 0.777019
| 0.196036
| 0
| 0
| 0
| 0
| 0.188445
| 0.052376
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022222
| false
| 0
| 0.133333
| 0
| 0.155556
| 0.088889
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
abde3c4b3322bc497127bee1536c6ca6039746ca
| 852
|
py
|
Python
|
packages/sia/datetime.py
|
varunsrivatsa/Sia
|
c5fc99f357138cfbcf050277f1aa201048cd26f4
|
[
"MIT"
] | 3
|
2022-01-28T17:51:05.000Z
|
2022-03-25T14:57:52.000Z
|
packages/sia/datetime.py
|
varunsrivatsa/Sia
|
c5fc99f357138cfbcf050277f1aa201048cd26f4
|
[
"MIT"
] | 19
|
2022-01-16T08:23:52.000Z
|
2022-03-18T22:27:02.000Z
|
packages/sia/datetime.py
|
varunsrivatsa/Sia
|
c5fc99f357138cfbcf050277f1aa201048cd26f4
|
[
"MIT"
] | 1
|
2022-03-09T06:23:42.000Z
|
2022-03-09T06:23:42.000Z
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import utils
from datetime import datetime
import calendar
def run(string, entities):
"""Sia tells time and date"""
string = string.lower()
now = datetime.now()
day = datetime.today()
if string.find("time") != -1 and string.find("date") == -1:
return utils.output('end', 'datetime', "Time is " + now.strftime("%I:%M %p"))
elif string.find("date") != -1 and string.find("time") == -1:
return utils.output('end', 'datetime', now.strftime("%B %d, %Y"))
elif string.find("day") != -1:
return utils.output('end', 'datetime', "Today is " + calendar.day_name[day.weekday()])
elif string.find("time") != -1 and string.find("date") != -1:
return utils.output('end', 'datetime', "Today's " + now.strftime(" date is %d-%m-%Y, and time is %I:%M %p"))
| 37.043478
| 116
| 0.593897
| 122
| 852
| 4.139344
| 0.352459
| 0.138614
| 0.09505
| 0.142574
| 0.376238
| 0.376238
| 0.318812
| 0.241584
| 0.241584
| 0.241584
| 0
| 0.011645
| 0.193662
| 852
| 22
| 117
| 38.727273
| 0.723435
| 0.076291
| 0
| 0
| 0
| 0
| 0.194872
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.2
| 0
| 0.533333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
abdf820c98b1659e30fa822be8c125f87ed89cc6
| 1,477
|
py
|
Python
|
python/rl_agent/model.py
|
iShohei220/Grounded-Language-Learning-in-Pytorch
|
75859829258dd33d4a75f79dc9348a1671a68b81
|
[
"CC-BY-4.0"
] | null | null | null |
python/rl_agent/model.py
|
iShohei220/Grounded-Language-Learning-in-Pytorch
|
75859829258dd33d4a75f79dc9348a1671a68b81
|
[
"CC-BY-4.0"
] | null | null | null |
python/rl_agent/model.py
|
iShohei220/Grounded-Language-Learning-in-Pytorch
|
75859829258dd33d4a75f79dc9348a1671a68b81
|
[
"CC-BY-4.0"
] | 1
|
2021-01-16T19:53:55.000Z
|
2021-01-16T19:53:55.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.optim as optim
import numpy as np
from collections import namedtuple
from network_modules import *
State = namedtuple('State', ('visual', 'instruction'))
class Model(nn.Module):
def __init__(self, action_space):
super(Model, self).__init__()
# Core modules
self.vision_m = Vision_M()
self.language_m = Language_M()
self.mixing_m = Mixing_M()
self.action_m = Action_M()
# Action selection and Value Critic
self.policy = Policy(action_space=action_space)
# Auxiliary networks
self.tAE = temporal_AutoEncoder(self.policy, self.vision_m)
self.language_predictor = Language_Prediction(self.language_m)
self.reward_predictor = RewardPredictor(self.vision_m, self.language_m, self.mixing_m)
def forward(self, x):
'''
Argument:
img: environment image, shape [batch_size, 84, 84, 3]
instruction: natural language instruction [batch_size, seq]
'''
vision_out = self.vision_m(x.visual)
language_out = self.language_m(x.instruction)
mix_out = self.mixing_m(vision_out, language_out)
action_out = self.action_m(mix_out)
action_prob, value = self.policy(action_out)
return action_prob, value
| 31.425532
| 94
| 0.641842
| 181
| 1,477
| 4.98895
| 0.353591
| 0.03876
| 0.048726
| 0.063123
| 0.108527
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00466
| 0.273527
| 1,477
| 47
| 95
| 31.425532
| 0.836906
| 0.134733
| 0
| 0
| 0
| 0
| 0.017901
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.296296
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
abe24ac8d4f36a5234b583961dedd7e2bc567ce6
| 2,298
|
py
|
Python
|
convlab/modules/e2e/multiwoz/Mem2Seq/Mem2Seq.py
|
ngduyanhece/ConvLab
|
a04582a77537c1a706fbf64715baa9ad0be1301a
|
[
"MIT"
] | 405
|
2019-06-17T05:38:47.000Z
|
2022-03-29T15:16:51.000Z
|
convlab/modules/e2e/multiwoz/Mem2Seq/Mem2Seq.py
|
ngduyanhece/ConvLab
|
a04582a77537c1a706fbf64715baa9ad0be1301a
|
[
"MIT"
] | 69
|
2019-06-20T22:57:41.000Z
|
2022-03-04T12:12:07.000Z
|
convlab/modules/e2e/multiwoz/Mem2Seq/Mem2Seq.py
|
ngduyanhece/ConvLab
|
a04582a77537c1a706fbf64715baa9ad0be1301a
|
[
"MIT"
] | 124
|
2019-06-17T05:11:23.000Z
|
2021-12-31T05:58:18.000Z
|
# -*- coding: utf-8 -*-
# Modified by Microsoft Corporation.
# Licensed under the MIT license.
"""
"""
import numpy as np
import torch
from nltk import word_tokenize
from .models.Mem2Seq import Mem2Seq
from .utils.config import args, USE_CUDA, UNK_token
from .utils.utils_woz_mem2seq import prepare_data_seq, generate_memory, MEM_TOKEN_SIZE
def plain2tensor(word2index, memory):
src_seqs = []
for token in memory:
src_seq = []
for word in token:
if word in word2index:
src_seq.append(word2index[word])
else:
src_seq.append(UNK_token)
src_seqs.append([src_seq])
return torch.LongTensor(src_seqs).cuda() if USE_CUDA else torch.LongTensor(src_seqs)
def denormalize(uttr):
uttr = uttr.replace(' -s', 's')
uttr = uttr.replace(' -ly', 'ly')
uttr = uttr.replace(' -er', 'er')
return uttr
class Mem2seq:
def __init__(self):
directory = args['path'].split("/")
task = directory[-1].split('HDD')[0]
HDD = directory[-1].split('HDD')[1].split('BSZ')[0]
L = directory[-1].split('L')[1].split('lr')[0]
_, _, _, _, self.lang, max_len, max_r = prepare_data_seq(task, batch_size=1)
self.model = Mem2Seq(int(HDD),max_len,max_r,self.lang,args['path'],task, lr=0.0, n_layers=int(L), dropout=0.0, unk_mask=0)
self.reset()
def reset(self):
self.t = 0
self.memory = []
def predict(self, query):
usr = query
print('Mem2Seq usr:', usr)
#example input: 'please find a restaurant called nusha .'
self.t += 1
print('Mem2Seq turn:', self.t)
usr = ' '.join(word_tokenize(usr.lower()))
self.memory += generate_memory(usr, '$u', self.t)
src_plain = (self.memory+[['$$$$']*MEM_TOKEN_SIZE],)
src_seqs = plain2tensor(self.lang.word2index, src_plain[0])
words = self.model.evaluate_batch(1, src_seqs, [len(src_plain[0])], None, None, None, None, src_plain)
row = np.transpose(words)[0].tolist()
if '<EOS>' in row:
row = row[:row.index('<EOS>')]
sys = ' '.join(row)
sys = denormalize(sys)
print('Mem2Seq sys:', sys)
self.memory += generate_memory(sys, '$s', self.t)
return sys
| 33.304348
| 130
| 0.597476
| 313
| 2,298
| 4.230032
| 0.345048
| 0.031722
| 0.033988
| 0.02719
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020266
| 0.248477
| 2,298
| 68
| 131
| 33.794118
| 0.746381
| 0.062663
| 0
| 0
| 0
| 0
| 0.043884
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096154
| false
| 0
| 0.115385
| 0
| 0.288462
| 0.057692
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
abe581f27ace8a8b0cd1d152b02ba112d26b24bc
| 21,754
|
py
|
Python
|
wwwhisper_admin/tests/tests_views.py
|
wrr/wwwhisper
|
38a55dd9c828fbb1b5a8234ea3ddf2242e684983
|
[
"MIT"
] | 54
|
2015-01-19T23:49:39.000Z
|
2021-02-18T01:14:51.000Z
|
wwwhisper_admin/tests/tests_views.py
|
wrr/wwwhisper
|
38a55dd9c828fbb1b5a8234ea3ddf2242e684983
|
[
"MIT"
] | 13
|
2015-01-26T14:51:10.000Z
|
2020-11-10T04:15:36.000Z
|
wwwhisper_admin/tests/tests_views.py
|
wrr/wwwhisper
|
38a55dd9c828fbb1b5a8234ea3ddf2242e684983
|
[
"MIT"
] | 11
|
2015-07-25T02:13:12.000Z
|
2021-07-10T14:11:46.000Z
|
# wwwhisper - web access control.
# Copyright (C) 2012-2018 Jan Wrobel <jan@mixedbit.org>
from wwwhisper_auth.models import Site
from wwwhisper_auth.tests.utils import HttpTestCase
from wwwhisper_auth.tests.utils import TEST_SITE
import json
FAKE_UUID = '41be0192-0fcc-4a9c-935d-69243b75533c'
TEST_USER_EMAIL = 'foo@bar.org'
TEST_LOCATION = '/pub/kika/'
TEST_ALIAS = 'https://foo.example.org'
def uid_regexp():
return '[0-9a-z-]{36}'
def extract_uuid(urn):
return urn.replace('urn:uuid:', '')
class AdminViewTestCase(HttpTestCase):
def add_user(self, user_name=TEST_USER_EMAIL):
response = self.post('/wwwhisper/admin/api/users/',
{'email' : user_name})
self.assertEqual(201, response.status_code)
return json.loads(response.content)
def add_location(self):
response = self.post('/wwwhisper/admin/api/locations/',
{'path' : TEST_LOCATION})
self.assertEqual(201, response.status_code)
return json.loads(response.content)
def add_alias(self):
response = self.post('/wwwhisper/admin/api/aliases/',
{'url' : TEST_ALIAS})
self.assertEqual(201, response.status_code)
return json.loads(response.content)
class UserTest(AdminViewTestCase):
def test_add_user(self):
response = self.post('/wwwhisper/admin/api/users/',
{'email' : TEST_USER_EMAIL})
self.assertEqual(201, response.status_code)
parsed_response_body = json.loads(response.content)
user_uuid = extract_uuid(parsed_response_body['id'])
self.assertRegexpMatches(parsed_response_body['id'],
'^urn:uuid:%s$' % uid_regexp())
self.assertEqual(TEST_USER_EMAIL, parsed_response_body['email'])
self_url = '%s/wwwhisper/admin/api/users/%s/' % (TEST_SITE, user_uuid)
self.assertEqual(self_url, parsed_response_body['self'])
self.assertEqual(self_url, response['Location'])
self.assertEqual(self_url, response['Content-Location'])
def test_get_user(self):
parsed_add_user_response_body = self.add_user()
get_response = self.get(parsed_add_user_response_body['self'])
self.assertEqual(200, get_response.status_code)
parsed_get_response_body = json.loads(get_response.content)
self.assertEqual(parsed_add_user_response_body,
parsed_get_response_body)
def test_delete_user(self):
user_url = self.add_user()['self']
self.assertEqual(204, self.delete(user_url).status_code)
self.assertEqual(404, self.get(user_url).status_code)
def test_get_users_list(self):
self.assertEqual(201, self.post('/wwwhisper/admin/api/users/',
{'email' : 'foo@bar.org'}).status_code)
self.assertEqual(201, self.post('/wwwhisper/admin/api/users/',
{'email' : 'baz@bar.org'}).status_code)
self.assertEqual(201, self.post('/wwwhisper/admin/api/users/',
{'email' : 'boo@bar.org'}).status_code)
response = self.get('/wwwhisper/admin/api/users/')
self.assertEqual(200, response.status_code)
parsed_response_body = json.loads(response.content)
self.assertEqual('%s/wwwhisper/admin/api/users/' % TEST_SITE,
parsed_response_body['self'])
users = parsed_response_body['users']
self.assertEqual(3, len(users))
self.assertItemsEqual(['foo@bar.org', 'baz@bar.org', 'boo@bar.org'],
[item['email'] for item in users])
def test_get_not_existing_user(self):
response = self.get('/wwwhisper/admin/api/users/%s/' % FAKE_UUID)
self.assertEqual(404, response.status_code)
self.assertRegexpMatches(response.content, 'User not found')
def test_add_user_invalid_email(self):
response = self.post('/wwwhisper/admin/api/users/',
{'email' : 'foo.bar'})
self.assertEqual(400, response.status_code)
self.assertRegexpMatches(response.content, 'Invalid email format')
def test_add_existing_user(self):
self.add_user()
response = self.post('/wwwhisper/admin/api/users/',
{'email' : TEST_USER_EMAIL})
self.assertEqual(400, response.status_code)
self.assertRegexpMatches(response.content, 'User already exists')
def test_delete_user_twice(self):
user_url = self.add_user()['self']
response = self.delete(user_url)
self.assertEqual(204, response.status_code)
response = self.delete(user_url)
self.assertEqual(404, response.status_code)
self.assertRegexpMatches(response.content, 'User not found')
def test_users_limit(self):
limit = 8
Site.users_limit = limit
for i in range(0, limit):
email = '%s%d' % (TEST_USER_EMAIL, i)
response = self.post('/wwwhisper/admin/api/users/',
{'email' : email})
self.assertEqual(201, response.status_code)
email = '%s%d' % (TEST_USER_EMAIL, limit)
response = self.post('/wwwhisper/admin/api/users/',
{'email' : email})
self.assertEqual(400, response.status_code)
self.assertRegexpMatches(response.content, 'Users limit exceeded')
class LocationTest(AdminViewTestCase):
def test_add_location(self):
response = self.post('/wwwhisper/admin/api/locations/',
{'path' : TEST_LOCATION})
self.assertEqual(201, response.status_code)
parsed_response_body = json.loads(response.content)
location_uuid = extract_uuid(parsed_response_body['id'])
self.assertRegexpMatches(parsed_response_body['id'],
'^urn:uuid:%s$' % uid_regexp())
self.assertEqual(TEST_LOCATION, parsed_response_body['path'])
self.assertTrue('openAccess' not in parsed_response_body)
self_url = '{0}/wwwhisper/admin/api/locations/{1}/'.format(
TEST_SITE, location_uuid)
self.assertEqual(self_url, parsed_response_body['self'])
self.assertEqual(self_url, response['Location'])
self.assertEqual(self_url, response['Content-Location'])
def test_get_location(self):
parsed_add_location_response_body = self.add_location()
get_response = self.get(parsed_add_location_response_body['self'])
self.assertEqual(200, get_response.status_code)
parsed_get_response_body = json.loads(get_response.content)
self.assertEqual(parsed_add_location_response_body,
parsed_get_response_body)
def test_grant_open_access_to_location(self):
location = self.add_location()
self.assertTrue('openAccess' not in location)
open_access_url = location['self'] + 'open-access/'
put_response = self.put(open_access_url)
parsed_response_body = json.loads(put_response.content)
self.assertEqual(201, put_response.status_code)
self.assertEqual(open_access_url, put_response['Location'])
self.assertEqual(open_access_url, parsed_response_body['self'])
# Get location again and make sure openAccess attribute is now true.
location = json.loads(self.get(location['self']).content)
self.assertTrue('openAccess' in location)
def test_grant_open_access_to_location_if_already_granted(self):
location = self.add_location()
open_access_url = location['self'] + 'open-access/'
put_response1 = self.put(open_access_url)
put_response2 = self.put(open_access_url)
self.assertEqual(200, put_response2.status_code)
self.assertFalse(put_response2.has_header('Location'))
self.assertEqual(put_response1.content, put_response2.content)
def test_check_open_access_to_location(self):
location = self.add_location()
open_access_url = location['self'] + 'open-access/'
self.put(open_access_url)
get_response = self.get(open_access_url)
parsed_response_body = json.loads(get_response.content)
self.assertEqual(200, get_response.status_code)
self.assertEqual(open_access_url, parsed_response_body['self'])
def test_revoke_open_access_to_location(self):
location = self.add_location()
open_access_url = location['self'] + 'open-access/'
self.put(open_access_url)
delete_response = self.delete(open_access_url)
self.assertEqual(204, delete_response.status_code)
get_response = self.get(open_access_url)
self.assertEqual(404, get_response.status_code)
def test_revoke_open_access_to_location_if_already_revoked(self):
location = self.add_location()
open_access_url = location['self'] + 'open-access/'
self.put(open_access_url)
self.delete(open_access_url)
delete_response = self.delete(open_access_url)
self.assertEqual(404, delete_response.status_code)
def test_delete_location(self):
location_url = self.add_location()['self']
self.assertEqual(204, self.delete(location_url).status_code)
self.assertEqual(404, self.get(location_url).status_code)
def test_get_locations_list(self):
self.assertEqual(201, self.post('/wwwhisper/admin/api/locations/',
{'path' : '/foo/bar'}).status_code)
self.assertEqual(201, self.post('/wwwhisper/admin/api/locations/',
{'path' : '/baz/bar'}).status_code)
self.assertEqual(201, self.post('/wwwhisper/admin/api/locations/',
{'path' : '/boo/bar/'}).status_code)
response = self.get('/wwwhisper/admin/api/locations/')
self.assertEqual(200, response.status_code)
parsed_response_body = json.loads(response.content)
self.assertEquals('%s/wwwhisper/admin/api/locations/' % TEST_SITE,
parsed_response_body['self'])
locations = parsed_response_body['locations']
self.assertEqual(3, len(locations))
self.assertItemsEqual(['/foo/bar', '/baz/bar', '/boo/bar/'],
[item['path'] for item in locations])
def test_get_not_existing_location(self):
response = self.get('/wwwhisper/admin/api/locations/%s/' % FAKE_UUID)
self.assertEqual(404, response.status_code)
self.assertRegexpMatches(response.content, 'Location not found')
def test_add_location_invalid_path(self):
response = self.post('/wwwhisper/admin/api/locations/',
{'path' : '/foo/../bar'})
self.assertEqual(400, response.status_code)
self.assertRegexpMatches(response.content,
'Path should be absolute and normalized')
def test_add_existing_location(self):
self.add_location()
response = self.post('/wwwhisper/admin/api/locations/',
{'path' : TEST_LOCATION})
self.assertEqual(400, response.status_code)
self.assertRegexpMatches(response.content, 'Location already exists')
def test_delete_location_twice(self):
location_url = self.add_location()['self']
response = self.delete(location_url)
self.assertEqual(204, response.status_code)
response = self.delete(location_url)
self.assertEqual(404, response.status_code)
self.assertRegexpMatches(response.content, 'Location not found')
def test_locations_limit(self):
limit = 7
Site.locations_limit = limit
for i in range(0, limit):
path = '%s%d' % (TEST_LOCATION, i)
response = self.post('/wwwhisper/admin/api/locations/',
{'path' : path})
self.assertEqual(201, response.status_code)
path = '%s%d' % (TEST_LOCATION, limit)
response = self.post('/wwwhisper/admin/api/locations/', {'path' : path})
self.assertEqual(400, response.status_code)
self.assertRegexpMatches(response.content, 'Locations limit exceeded')
class AccessControlTest(AdminViewTestCase):
def can_access(self, location_url, user_uuid):
response = self.get(location_url + 'allowed-users/' + user_uuid + '/')
self.assertTrue(response.status_code == 200
or response.status_code == 404)
return response.status_code == 200
def test_grant_access(self):
location_url = self.add_location()['self']
response = self.add_user()
user_url = response['self']
user_urn = response['id']
user_uuid = extract_uuid(user_urn)
response = self.put(location_url + 'allowed-users/' + user_uuid + '/')
self.assertEqual(201, response.status_code)
parsed_response_body = json.loads(response.content)
resource_url = location_url + 'allowed-users/' + user_uuid + '/'
self.assertEqual(resource_url, response['Location'])
self.assertFalse(response.has_header('Content-Location'))
self.assertEqual(resource_url, parsed_response_body['self'])
self.assertEqual(user_url, parsed_response_body['user']['self'])
self.assertEqual(user_urn, parsed_response_body['user']['id'])
self.assertEqual(TEST_USER_EMAIL, parsed_response_body['user']['email'])
def test_grant_access_creates_allowed_user_resource(self):
location_url = self.add_location()['self']
response = self.add_user()
user_uuid = extract_uuid(response['id'])
self.assertFalse(self.can_access(location_url, user_uuid))
self.put(location_url + 'allowed-users/' + user_uuid + "/")
self.assertTrue(self.can_access(location_url, user_uuid))
def test_revoke_access(self):
location_url = self.add_location()['self']
response = self.add_user()
user_uuid = extract_uuid(response['id'])
# Allow access.
self.put(location_url + 'allowed-users/' + user_uuid + "/")
self.assertTrue(self.can_access(location_url, user_uuid))
# Revoke access.
response = self.delete(
location_url + 'allowed-users/' + user_uuid + "/")
self.assertEqual(204, response.status_code)
self.assertFalse(self.can_access(location_url, user_uuid))
def test_location_lists_allowed_users(self):
location_url = self.add_location()['self']
# Create two users.
user1_urn = self.add_user('user1@acme.com')['id']
user1_uuid = extract_uuid(user1_urn)
user2_urn = self.add_user('user2@acme.com')['id']
user2_uuid = extract_uuid(user2_urn)
self.put(location_url + 'allowed-users/' + user1_uuid + "/")
self.put(location_url + 'allowed-users/' + user2_uuid + "/")
response = self.get(location_url)
parsed_response_body = json.loads(response.content)
allowed_users = parsed_response_body['allowedUsers']
self.assertEqual(2, len(allowed_users))
self.assertItemsEqual(['user1@acme.com', 'user2@acme.com'],
[item['email'] for item in allowed_users])
self.assertItemsEqual([user1_urn, user2_urn],
[item['id'] for item in allowed_users])
def test_grant_access_to_not_existing_location(self):
location_url = '/wwwhisper/admin/api/locations/%s/' % FAKE_UUID
user_uuid = extract_uuid(self.add_user()['id'])
response = self.put(location_url + 'allowed-users/' + user_uuid + '/')
self.assertEqual(404, response.status_code)
self.assertRegexpMatches(response.content, 'Location not found')
def test_grant_access_for_not_existing_user(self):
location_url = self.add_location()['self']
user_uuid = FAKE_UUID
response = self.put(location_url + 'allowed-users/' + user_uuid + '/')
self.assertEqual(404, response.status_code)
self.assertRegexpMatches(response.content, 'User not found')
# PUT should be indempontent, granting access for the second time
# should not return an error.
def test_grant_access_twice(self):
location_url = self.add_location()['self']
response = self.add_user()
user_url = response['self']
user_uuid = extract_uuid(response['id'])
response1 = self.put(location_url + 'allowed-users/' + user_uuid + "/")
self.assertEqual(201, response1.status_code)
self.assertTrue(response1.has_header('Location'))
response2 = self.put(location_url + 'allowed-users/' + user_uuid + "/")
self.assertEqual(200, response2.status_code)
self.assertFalse(response2.has_header('Location'))
self.assertEqual(response1.content, response2.content)
def test_revoke_access_twice(self):
location_url = self.add_location()['self']
response = self.add_user()
user_url = response['self']
user_uuid = extract_uuid(response['id'])
# Allow access.
self.put(location_url + 'allowed-users/' + user_uuid + "/")
self.assertTrue(self.can_access(location_url, user_uuid))
# Revoke access.
response = self.delete(
location_url + 'allowed-users/' + user_uuid + "/")
self.assertEqual(204, response.status_code)
response = self.delete(
location_url + 'allowed-users/' + user_uuid + "/")
self.assertEqual(404, response.status_code)
self.assertRegexpMatches(response.content,
'User can not access location.')
self.assertFalse(self.can_access(location_url, user_uuid))
class AliasTest(AdminViewTestCase):
def test_add_alias(self):
response = self.post('/wwwhisper/admin/api/aliases/',
{'url' : TEST_ALIAS})
self.assertEqual(201, response.status_code)
parsed_response_body = json.loads(response.content)
alias_uuid = extract_uuid(parsed_response_body['id'])
self.assertRegexpMatches(parsed_response_body['id'],
'^urn:uuid:%s$' % uid_regexp())
self.assertEqual(TEST_ALIAS, parsed_response_body['url'])
self_url = '{0}/wwwhisper/admin/api/aliases/{1}/'.format(
TEST_SITE, alias_uuid)
self.assertEqual(self_url, parsed_response_body['self'])
self.assertEqual(self_url, response['Location'])
self.assertEqual(self_url, response['Content-Location'])
def test_get_alias(self):
parsed_post_response_body = self.add_alias()
get_response = self.get(parsed_post_response_body['self'])
self.assertEqual(200, get_response.status_code)
parsed_get_response_body = json.loads(get_response.content)
self.assertEqual(parsed_post_response_body, parsed_get_response_body)
def test_delete_alias(self):
alias_url = self.add_alias()['self']
self.assertEqual(204, self.delete(alias_url).status_code)
self.assertEqual(404, self.get(alias_url).status_code)
def test_get_aliases_list(self):
self.assertEqual(201, self.post('/wwwhisper/admin/api/aliases/',
{'url' : 'http://foo.org'}).status_code)
self.assertEqual(201, self.post('/wwwhisper/admin/api/aliases/',
{'url' : 'http://bar.org'}).status_code)
response = self.get('/wwwhisper/admin/api/aliases/')
self.assertEqual(200, response.status_code)
parsed_response_body = json.loads(response.content)
self.assertEqual('%s/wwwhisper/admin/api/aliases/' % TEST_SITE,
parsed_response_body['self'])
aliases = parsed_response_body['aliases']
# Two created aliases + the original one.
self.assertEqual(3, len(aliases))
self.assertItemsEqual(['http://foo.org', 'http://bar.org',
'https://foo.example.org:8080'],
[item['url'] for item in aliases])
class SkinTest(AdminViewTestCase):
def test_get_skin(self):
response = self.get('/wwwhisper/admin/api/skin/')
self.assertEqual(200, response.status_code)
skin = json.loads(response.content)
self.assertEqual('wwwhisper: Web Access Control', skin['title'])
self.assertEqual('Protected site', skin['header'])
self.assertRegexpMatches(skin['message'], 'Access to this site is')
self.assertTrue(skin['branding'])
def test_put_skin(self):
response = self.put('/wwwhisper/admin/api/skin/',
{'title': 'xyz',
'header': 'foo',
'message': 'bar',
'branding': False})
self.assertEqual(200, response.status_code)
skin = json.loads(response.content)
self.assertEqual('xyz', skin['title'])
self.assertEqual('foo', skin['header'])
self.assertRegexpMatches('bar', skin['message'])
self.assertFalse(skin['branding'])
def test_put_invalid_skin(self):
response = self.put('/wwwhisper/admin/api/skin/',
{'title': 'xyz' * 1000,
'header': '',
'message': '',
'branding': False})
self.assertEqual(400, response.status_code)
self.assertRegexpMatches(response.content,
'Failed to update login page')
| 44.036437
| 80
| 0.635699
| 2,487
| 21,754
| 5.319662
| 0.072376
| 0.104308
| 0.058503
| 0.036584
| 0.744822
| 0.697506
| 0.638549
| 0.589267
| 0.54452
| 0.498866
| 0
| 0.015899
| 0.242484
| 21,754
| 493
| 81
| 44.125761
| 0.786941
| 0.016549
| 0
| 0.465296
| 0
| 0
| 0.130951
| 0.053129
| 0
| 0
| 0
| 0
| 0.341902
| 1
| 0.113111
| false
| 0
| 0.010283
| 0.005141
| 0.154242
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
abe6448fcecf9dc6bfc75f1868f21902baa70a7a
| 2,139
|
py
|
Python
|
text_plistlib/plistlib.py
|
Artoria2e5/text-plistlib
|
5f8c868341bc7e933c8c6e2587dbd75ad0dc93b0
|
[
"MIT"
] | null | null | null |
text_plistlib/plistlib.py
|
Artoria2e5/text-plistlib
|
5f8c868341bc7e933c8c6e2587dbd75ad0dc93b0
|
[
"MIT"
] | null | null | null |
text_plistlib/plistlib.py
|
Artoria2e5/text-plistlib
|
5f8c868341bc7e933c8c6e2587dbd75ad0dc93b0
|
[
"MIT"
] | null | null | null |
"""
Wrapper providing a plistlib interface. Better than a patch?
"""
__all__ = [
"InvalidFileException",
"FMT_XML",
"FMT_BINARY",
"FMT_TEXT",
"load",
"dump",
"loads",
"dumps",
"UID",
]
import plistlib as pl
from enum import Enum
from io import BytesIO
from typing import BinaryIO
from .impl import FMT_TEXT_HANDLER, TextPlistTypes
UID = pl.UID
InvalidFileException = pl.InvalidFileException
PF = Enum("TextPlistFormat", "FMT_XML FMT_BINARY FMT_TEXT", module=__name__)
globals().update(PF.__members__)
translation = {
PF.FMT_XML: pl.FMT_XML,
PF.FMT_BINARY: pl.FMT_BINARY,
}
def load(fp: BinaryIO, *, fmt=None, **kwargs) -> TextPlistTypes:
"""Read a .plist file (forwarding all arguments)."""
if fmt is None:
header = fp.read(32)
fp.seek(0)
if FMT_TEXT_HANDLER["detect"](header):
fmt = PF.FMT_TEXT
if fmt == PF.FMT_TEXT:
return FMT_TEXT_HANDLER["parser"](**kwargs).parse(fp)
else:
# This one can fail a bit more violently like the original
return pl.load(fp, fmt=translation[fmt], **kwargs)
def loads(value: bytes, **kwargs) -> TextPlistTypes:
"""
Read a .plist file from a bytes object.
>>> loads(b'{4=1;}', fmt=FMT_TEXT)
{'4': '1'}
"""
return load(BytesIO(value), **kwargs)
def dump(value: TextPlistTypes, fp, *, fmt=PF.FMT_TEXT, **kwargs):
if fmt == PF.FMT_TEXT:
writer = FMT_TEXT_HANDLER["writer"](fp, **kwargs)
writer.write(value)
else:
# ignore type -- let the real plistlib complain about None :)
return pl.dump(value, fp, fmt=translation.get(fmt), **kwargs) # type: ignore
def dumps(value: TextPlistTypes, **kwargs) -> bytes:
"""
>>> dumps({ "1": [2,3,4,None,5] })
b'{\n\t"1" = (\n\t\t<*I2>,\n\t\t<*I3>,\n\t\t<*I4>,\n\t\t"",\n\t\t<*I5>,\n\t);\n}'
"""
fp = BytesIO()
dump(value, fp, **kwargs)
return fp.getvalue()
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
print(dumps(eval(sys.argv[1])))
| 25.771084
| 86
| 0.586255
| 287
| 2,139
| 4.219512
| 0.348432
| 0.063584
| 0.012386
| 0.039637
| 0.115607
| 0.092486
| 0
| 0
| 0
| 0
| 0
| 0.011897
| 0.253389
| 2,139
| 82
| 87
| 26.085366
| 0.7464
| 0.240767
| 0
| 0.08
| 0
| 0
| 0.086008
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.12
| 0
| 0.3
| 0.02
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
abe683c77717a86c75c2669b78ad1d9d8749390b
| 3,992
|
py
|
Python
|
treat/moc/kinf/make_kinf.py
|
tjlaboss/tasty_treat
|
5a137b49c6648eda6500025de8bab9c8dcc78d45
|
[
"MIT"
] | 3
|
2019-03-04T22:52:07.000Z
|
2022-01-23T12:28:58.000Z
|
treat/moc/kinf/make_kinf.py
|
tjlaboss/tasty_treat
|
5a137b49c6648eda6500025de8bab9c8dcc78d45
|
[
"MIT"
] | 3
|
2021-07-23T17:30:35.000Z
|
2021-09-17T16:25:57.000Z
|
treat/moc/kinf/make_kinf.py
|
tjlaboss/tasty_treat
|
5a137b49c6648eda6500025de8bab9c8dcc78d45
|
[
"MIT"
] | null | null | null |
# Make kinf
#
# Analyze the fuel materials from different libraries
import numpy as np
import openmc
from openmc import mgxs
import os
import sys; sys.path.append("..")
import materials
import energy_groups
def _get_fuel(library):
try:
fuel = library.get_material("fuel")
except KeyError:
fuel = library.get_material("fuel 7.6 ppm")
return fuel
def get_geometry(fuel):
root_cell = openmc.Cell(name="root cell")
root_cell.fill = fuel
w = openmc.XPlane(x0=-10, boundary_type="periodic")
e = openmc.XPlane(x0=+10, boundary_type="periodic")
s = openmc.YPlane(y0=-10, boundary_type="periodic")
n = openmc.YPlane(y0=+10, boundary_type="periodic")
b = openmc.ZPlane(z0=-10, boundary_type="periodic")
t = openmc.ZPlane(z0=+10, boundary_type="periodic")
root_cell.region = +w & -e & +s & -n & +b & -t
root_universe = openmc.Universe(0, "root universe", [root_cell])
g = openmc.Geometry()
g.root_universe = root_universe
return g
def get_materials(library):
openmc_mats = library.toOpenmcMaterials()
return openmc_mats
def get_settings():
s = openmc.Settings()
s.particles = int(1E6)
s.batches = 100
s.inactive = 25
return s
def _mgxs_groups(groups, geom, fuel, by_nuclide=False):
material_libraries = {}
for g in groups:
assert g in energy_groups.ALL_GROUP_NUMBERS
if g == 11:
eg = energy_groups.treat["11-group"]
else:
key = "{}-group".format(g)
eg = energy_groups.casmo[key]
eg.group_edges *= 1E6
lib = mgxs.Library(geom)
lib.energy_groups = eg
lib.mgxs_types = ['nu-transport', 'transport', 'total', 'fission',
'nu-fission', 'capture', 'chi', 'consistent nu-scatter matrix']
lib.correction = "P0"
lib.by_nuclide = by_nuclide
lib.domain_type = "material"
lib.domains = [fuel]
lib.build_library()
material_libraries[g] = lib
return material_libraries
def get_tallies(fuel, libraries=None):
tallies = openmc.Tallies()
tal1 = openmc.Tally()
tal1.scores = ["absorption"]
nucs = list(np.array(fuel.nuclides)[:,0])
tal1.nuclides = nucs
tallies.extend([tal1])
if libraries is not None:
for lib in libraries.values():
lib.add_to_tallies_file(tallies)
return tallies
def export_to_xml(export_path, s, g, m, t=None, l=None):
assert isinstance(s, openmc.Settings)
assert isinstance(g, openmc.Geometry)
assert isinstance(m, openmc.Materials)
if t is not None:
assert isinstance(t, openmc.Tallies)
t.export_to_xml(export_path + "/tallies.xml")
if l is not None:
for lib in l.values():
fname = "material_lib_{}".format(lib.num_groups)
lib.dump_to_file(fname, directory=export_path)
s.export_to_xml(export_path + "/settings.xml")
g.export_to_xml(export_path + "/geometry.xml")
m.export_to_xml(export_path + "/materials.xml")
def build_model(lib, multigroup):
matlib = materials.get_library(lib)
if not os.path.isdir(lib):
# Standard PermissionError is exactly what we want
os.mkdir(lib)
print("Exporting to:", lib)
fuel = _get_fuel(matlib)
# replace natural elements to nuclides
# Note: I think this can be done with "fuel.get_nuclide_densities()"
all_elements = fuel.elements[:]
for el in all_elements:
elem, etype, efrac = el[0:3]
for nuc, nfrac, ntype in elem.expand(etype, efrac):
fuel.add_nuclide(nuc, nfrac, ntype)
fuel.remove_element(elem)
mats = get_materials(matlib)
sets = get_settings()
geom = get_geometry(fuel)
libs = _mgxs_groups(multigroup, geom, fuel)
tals = get_tallies(fuel, libs)
export_to_xml(lib, sets, geom, mats, tals, libs)
# Extract the nuclide number densities
fuel_atoms = fuel.get_nuclide_atom_densities()
nuclide_results = np.array(list(fuel_atoms.values()))
nuclides = np.array([n.name for n in nuclide_results[:, 0]])
np.savetxt(lib + "/nuclides.txt", nuclides, fmt='%s')
atom_dens = nuclide_results[:, 1]
np.savetxt(lib + "/atom_dens.txt", atom_dens)
atom_frac = atom_dens / atom_dens.sum()
np.savetxt(lib + "/atom_frac.txt", atom_frac)
if __name__ == "__main__":
build_model("BATMAN", multigroup=[11, 25])
| 29.352941
| 75
| 0.717435
| 601
| 3,992
| 4.584027
| 0.301165
| 0.021779
| 0.03049
| 0.047913
| 0.147731
| 0.090744
| 0.078403
| 0
| 0
| 0
| 0
| 0.014109
| 0.147796
| 3,992
| 135
| 76
| 29.57037
| 0.795708
| 0.062876
| 0
| 0
| 0
| 0
| 0.091615
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 1
| 0.072727
| false
| 0
| 0.063636
| 0
| 0.190909
| 0.009091
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
abe704d6ccddf0e4852958c0661a8661be5aca37
| 1,775
|
py
|
Python
|
Configuration/Skimming/test/tier1_hi/hiHighPt_PromptSkim2011_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
Configuration/Skimming/test/tier1_hi/hiHighPt_PromptSkim2011_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
Configuration/Skimming/test/tier1_hi/hiHighPt_PromptSkim2011_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("HIGHPTSKIM")
process.load('Configuration.StandardSequences.Services_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContentHeavyIons_cff')
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'file:/mnt/hadoop/cms/store/hidata/HIRun2010/HIAllPhysics/RECO/SDmaker_3SD_1CS_PDHIAllPhysicsZSv2_SD_JetHI-v1/0000/A8934EC1-904B-E011-862C-003048F17528.root'
)
)
# =============== Other Statements =====================
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(-1))
process.options = cms.untracked.PSet(wantSummary = cms.untracked.bool(True))
#Trigger Selection
### Comment out for the timing being assuming running on secondary dataset with trigger bit selected already
import HLTrigger.HLTfilters.hltHighLevel_cfi
process.hltHIHighPt = HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone()
process.hltHIHighPt.HLTPaths = ['HLT_HIDoublePhoton15_*','HLT_HIJet80_*','HLT_HISinglePhoton40_*'] # for allphysics
process.hltHIHighPt.andOr = cms.bool(True)
process.hltHIHighPt.throw = cms.bool(False)
process.eventFilter_step = cms.Path( process.hltHIHighPt )
process.output = cms.OutputModule("PoolOutputModule",
outputCommands = process.RECOEventContent.outputCommands,
fileName = cms.untracked.string('hiHighPt.root'),
SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring('eventFilter_step')),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('RECO'),
filterName = cms.untracked.string('hiHighPt'))
)
process.output_step = cms.EndPath(process.output)
process.schedule = cms.Schedule(
process.eventFilter_step,
process.output_step
)
| 41.27907
| 165
| 0.774085
| 198
| 1,775
| 6.833333
| 0.520202
| 0.088692
| 0.047302
| 0.050259
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028536
| 0.091831
| 1,775
| 42
| 166
| 42.261905
| 0.810794
| 0.107606
| 0
| 0
| 0
| 0.032258
| 0.268865
| 0.211795
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.064516
| 0
| 0.064516
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
abf5a5efa1eec3c81e55b4c7175e48f263de13b1
| 16,929
|
py
|
Python
|
tests/continual/test_container.py
|
LukasHedegaard/continual-inference
|
99a01f1360c56e2578231edd8fecb0dbadbf44d0
|
[
"Apache-2.0"
] | 7
|
2021-09-22T14:42:59.000Z
|
2022-03-28T20:43:25.000Z
|
tests/continual/test_container.py
|
LukasHedegaard/continual-inference
|
99a01f1360c56e2578231edd8fecb0dbadbf44d0
|
[
"Apache-2.0"
] | 29
|
2021-08-11T06:58:21.000Z
|
2022-03-29T07:19:37.000Z
|
tests/continual/test_container.py
|
LukasHedegaard/continual-inference
|
99a01f1360c56e2578231edd8fecb0dbadbf44d0
|
[
"Apache-2.0"
] | 2
|
2021-10-03T20:03:09.000Z
|
2021-12-03T17:31:48.000Z
|
import math
from collections import OrderedDict
import pytest
import torch
from torch import nn
import continual as co
from continual.module import TensorPlaceholder
torch.manual_seed(42)
def test_sequential():
S = 3
long_example_clip = torch.normal(mean=torch.zeros(10 * 3 * 3)).reshape(
(1, 1, 10, 3, 3)
)
seq = nn.Sequential(
nn.Conv3d(
in_channels=1,
out_channels=1,
kernel_size=(5, S, S),
bias=True,
padding=(0, 1, 1),
padding_mode="zeros",
),
nn.Conv3d(
in_channels=1,
out_channels=1,
kernel_size=(3, S, S),
bias=True,
padding=(0, 1, 1),
padding_mode="zeros",
),
nn.MaxPool3d(kernel_size=(1, 2, 2)),
)
coseq = co.Sequential.build_from(seq)
assert coseq.delay == (5 - 1) + (3 - 1)
# forward
output = seq.forward(long_example_clip)
co_output = coseq.forward(long_example_clip)
assert torch.allclose(output, co_output)
# forward_steps
co_output_firsts_0 = coseq.forward_steps(
long_example_clip[:, :, :-1], update_state=False
)
co_output_firsts = coseq.forward_steps(long_example_clip[:, :, :-1])
assert torch.allclose(co_output_firsts, co_output_firsts_0)
assert torch.allclose(co_output_firsts, output[:, :, :-1])
# forward_step
co_output_last_0 = coseq.forward_step(
long_example_clip[:, :, -1], update_state=False
)
co_output_last = coseq.forward_step(long_example_clip[:, :, -1])
assert torch.allclose(co_output_last, co_output_last_0)
assert torch.allclose(co_output_last, output[:, :, -1])
# Clean state can be used to restart seq computation
coseq.clean_state()
co_output_firsts = coseq.forward_steps(long_example_clip[:, :, :-1])
assert torch.allclose(co_output_firsts, output[:, :, :-1])
def test_sequential_receptive_field():
sample = torch.randn((1, 1, 100))
# No padding, stride 1
net = co.Sequential(*[co.Conv1d(1, 1, 9) for _ in range(10)])
assert net.receptive_field == 9 + 8 * 9
output = net.forward(sample)
assert output.shape[2] == 100 - (net.receptive_field - 1)
# Padding, stride 1
net = co.Sequential(*[co.Conv1d(1, 1, 9, padding=4) for _ in range(10)])
assert net.receptive_field == 9 + 8 * 9
output = net.forward(sample)
assert output.shape[2] == 100 - (net.receptive_field - 1) + 2 * net.padding
# No padding, mixed stride
net = co.Sequential(
co.Conv1d(1, 1, 3, padding=0, stride=1),
co.Conv1d(1, 1, 3, padding=0, stride=2),
co.Conv1d(1, 1, 3, padding=0, stride=3),
co.Conv1d(1, 1, 3, padding=0, stride=1),
)
assert net.receptive_field == 21
output = net.forward(sample)
assert output.shape[2] == math.ceil((100 - (net.receptive_field - 1)) / net.stride)
# Padding, mixed stride
net = co.Sequential(
co.Conv1d(1, 1, 3, padding=1, stride=1),
co.Conv1d(1, 1, 3, padding=1, stride=2),
co.Conv1d(1, 1, 3, padding=1, stride=3),
co.Conv1d(1, 1, 3, padding=1, stride=1),
)
assert net.receptive_field == 21
output = net.forward(sample)
assert net.padding == 1 + 1 + 2 + 2 * 3
assert output.shape[2] == math.ceil(
(100 - (net.receptive_field - 1) + 2 * net.padding) / net.stride
)
def test_sequential_with_TensorPlaceholder():
sample = torch.arange(32, dtype=torch.float).reshape((1, 1, 32))
seq = nn.Sequential(
nn.Conv1d(
in_channels=1,
out_channels=1,
kernel_size=3,
bias=False,
padding=1,
padding_mode="zeros",
),
nn.MaxPool1d(
kernel_size=2,
stride=2, # Has temporal skips
padding=0,
),
nn.Conv1d(
in_channels=1,
out_channels=1,
kernel_size=3,
bias=False,
stride=2, # Has temporal skips
padding=1,
padding_mode="zeros",
),
)
torch.nn.init.ones_(seq[0].weight)
torch.nn.init.ones_(seq[2].weight)
coseq = co.Sequential.build_from(seq)
assert coseq.stride == 4
assert coseq.padding == 3
assert coseq.receptive_field == 8
assert coseq.delay == 4
target = seq.forward(sample)
# forward_steps with padding
output = coseq.forward_steps(sample, pad_end=True)
assert torch.allclose(target, output)
coseq.clean_state()
out_stepwise = []
for i in range(sample.shape[2]):
out_stepwise.append(coseq.forward_step(sample[:, :, i]))
out_cleaned = torch.stack(
[o for o in out_stepwise if isinstance(o, torch.Tensor)], dim=2
)
assert torch.allclose(target[:, :, :-1], out_cleaned)
def test_sum_reduce():
ones = torch.ones((1, 2, 4, 3, 3))
twos = torch.ones((1, 2, 4, 3, 3)) * 2
res = co.container.reduce_sum([ones, ones])
assert torch.allclose(res, twos)
def test_concat_reduce():
ones = torch.ones((1, 2, 4, 3, 3))
twos = torch.ones((1, 2, 4, 3, 3)) * 2
res = co.container.reduce_concat([ones, twos])
assert res.shape == (1, 4, 4, 3, 3)
assert torch.allclose(res[:, :2], ones)
assert torch.allclose(res[:, 2:], twos)
def test_residual():
input = torch.arange(6, dtype=torch.float).reshape((1, 1, 6))
conv = nn.Conv1d(1, 1, kernel_size=3, padding=1, bias=False)
torch.nn.init.ones_(conv.weight)
co_conv = co.Conv1d.build_from(conv)
co_res = co.Residual(co_conv)
# Target behavior: Discard outputs from temporal padding
target = conv(input) + input
# forward
out_manual_res = co_conv.forward(input) + input
assert torch.allclose(out_manual_res, target)
out_res = co_res.forward(input)
assert torch.allclose(out_res, target)
# forward_steps
out_firsts = co_res.forward_steps(input[:, :, :-1], pad_end=False)
assert torch.allclose(out_firsts, target[:, :, :4])
# forward_step
out_last = co_res.forward_step(input[:, :, -1])
assert torch.allclose(out_last, target[:, :, -2])
def test_residual_shrink():
input = torch.arange(6, dtype=torch.float).reshape((1, 1, 6))
conv = nn.Conv1d(1, 1, kernel_size=3, padding=0, bias=False)
torch.nn.init.ones_(conv.weight)
co_conv = co.Conv1d.build_from(conv)
co_res = co.Residual(co_conv, phantom_padding=True)
# Target behavior: Discard outputs from temporal padding
target = conv(input) + input[:, :, 1:-1]
# forward
out_manual_res = co_conv.forward(input) + input[:, :, 1:-1]
assert torch.allclose(out_manual_res, target)
out_res = co_res.forward(input)
assert torch.allclose(out_res, target)
# forward_step
output_step = []
for t in range(input.shape[2]):
y = co_res.forward_step(input[:, :, t])
if isinstance(y, torch.Tensor):
output_step.append(y)
output_step = torch.stack(output_step, dim=2)
assert torch.allclose(output_step, target)
# forward_steps
co_res.clean_state()
out_firsts = co_res.forward_steps(input[:, :, :-1], pad_end=False)
assert torch.allclose(out_firsts, target[:, :, :3])
# forward_step
out_last = co_res.forward_step(input[:, :, -1])
assert torch.allclose(out_last, target[:, :, -1])
def test_broadcast_reduce():
input = torch.arange(7, dtype=torch.float).reshape((1, 1, 7))
c5 = co.Conv1d(1, 1, kernel_size=5, padding=2, bias=False)
c3 = co.Conv1d(1, 1, kernel_size=3, padding=1, bias=False)
c1 = co.Conv1d(1, 1, kernel_size=1, padding=0, bias=False)
torch.nn.init.ones_(c5.weight)
torch.nn.init.ones_(c3.weight)
torch.nn.init.ones_(c1.weight)
par = co.BroadcastReduce(OrderedDict([("c5", c5), ("c3", c3), ("c1", c1)]))
assert par.stride == 1
assert par.delay == 2
assert par.padding == 2
assert par.receptive_field == 5
assert "BroadcastReduce(" in par.__repr__() and "reduce=" in par.__repr__()
# forward
out_all = par.forward(input)
assert torch.allclose(
out_all, torch.tensor([[[4.0, 10.0, 18.0, 27.0, 36.0, 38.0, 32.0]]])
)
# forward_step
out_steps = [par.forward_step(input[:, :, i]) for i in range(input.shape[2])]
assert all(isinstance(o, TensorPlaceholder) for o in out_steps[: par.delay])
out_steps = torch.stack(out_steps[par.delay :], dim=2)
assert torch.allclose(out_steps, out_all[:, :, : -par.delay])
# forward_steps
par.clean_state()
out_steps_0 = par.forward_steps(input[:, :, :-1], pad_end=False, update_state=False)
out_steps = par.forward_steps(input[:, :, :-1], pad_end=False)
assert torch.allclose(out_steps, out_steps_0)
assert torch.allclose(out_steps, out_all[:, :, : -par.delay - 1])
out_step_0 = par.forward_step(input[:, :, -1], update_state=False) # continuation
out_step = par.forward_step(input[:, :, -1]) # continuation
assert torch.allclose(out_step, out_step_0)
assert torch.allclose(out_step, out_all[:, :, -par.delay - 1])
# with pad_end
par.clean_state()
out_steps = par.forward_steps(input, pad_end=True)
assert torch.allclose(out_steps, out_all)
def test_flat_state_dict():
# >> Part 1: Save both flat and original state dicts
# If modules are not named, it can be flattened
seq_to_flatten = co.Sequential(nn.Conv1d(1, 1, 3))
sd = seq_to_flatten.state_dict()
assert set(sd) == {"0.weight", "0.bias"}
sd_flat = seq_to_flatten.state_dict(flatten=True)
assert set(sd_flat) == {"weight", "bias"}
seq_not_to_flatten = co.Sequential(OrderedDict([("c1", nn.Conv1d(1, 1, 3))]))
sd_no_flat = seq_not_to_flatten.state_dict(flatten=True)
assert set(sd_no_flat) == {"c1.weight", "c1.bias"}
# A nested example:
nested = co.BroadcastReduce(seq_to_flatten, seq_not_to_flatten)
sd = nested.state_dict()
assert set(sd) == {"0.0.weight", "0.0.bias", "1.c1.weight", "1.c1.bias"}
sd_flat = nested.state_dict(flatten=True)
assert set(sd_flat) == {"weight", "bias", "c1.weight", "c1.bias"}
# >> Part 2: Load flat state dict
nested_new = co.BroadcastReduce(
co.Sequential(nn.Conv1d(1, 1, 3)),
co.Sequential(OrderedDict([("c1", nn.Conv1d(1, 1, 3))])),
)
assert not torch.equal(nested[0][0].weight, nested_new[0][0].weight)
assert not torch.equal(nested[0][0].bias, nested_new[0][0].bias)
assert not torch.equal(nested[1].c1.weight, nested_new[1].c1.weight)
assert not torch.equal(nested[1].c1.bias, nested_new[1].c1.bias)
nested_new.load_state_dict(sd_flat, flatten=True)
assert torch.equal(nested[0][0].weight, nested_new[0][0].weight)
assert torch.equal(nested[0][0].bias, nested_new[0][0].bias)
assert torch.equal(nested[1].c1.weight, nested_new[1].c1.weight)
assert torch.equal(nested[1].c1.bias, nested_new[1].c1.bias)
# >> Part 3: Test context manager
with co.utils.flat_state_dict:
# Export works as above despite `flatten=False`
sd_flat2 = nested.state_dict(flatten=False)
assert sd_flat.keys() == sd_flat2.keys()
assert all(torch.equal(sd_flat[key], sd_flat2[key]) for key in sd_flat.keys())
# Loading works as above despite `flatten=False`
nested_new.load_state_dict(sd_flat, flatten=False)
assert torch.equal(nested[0][0].weight, nested_new[0][0].weight)
assert torch.equal(nested[0][0].bias, nested_new[0][0].bias)
assert torch.equal(nested[1].c1.weight, nested_new[1].c1.weight)
assert torch.equal(nested[1].c1.bias, nested_new[1].c1.bias)
assert True # Need to step down here to trigger context manager __exit__
def test_conditional_only_first():
x = torch.ones((1, 1, 3))
def is_training(module, *args):
return module.training
mod = co.Conditional(is_training, co.Multiply(2))
mod.train()
assert torch.equal(mod.forward(x), x * 2)
assert torch.equal(mod.forward_steps(x), x * 2)
assert torch.equal(mod.forward_step(x[:, :, 0]), x[:, :, 0] * 2)
mod.eval()
assert torch.equal(mod.forward(x), x)
assert torch.equal(mod.forward_steps(x), x)
assert torch.equal(mod.forward_step(x[:, :, 0]), x[:, :, 0])
def test_conditional_both_cases():
x = torch.ones((1, 1, 3))
def is_training(module, *args):
return module.training
mod = co.Conditional(is_training, co.Multiply(2), co.Multiply(3))
assert mod.receptive_field == 1
assert (
mod.__repr__()
== """Conditional(\n predicate=is_training\n (0): Lambda(_multiply, takes_time=True)\n (1): Lambda(_multiply, takes_time=True)\n)"""
)
mod.train()
assert torch.equal(mod.forward(x), x * 2)
assert torch.equal(mod.forward_steps(x), x * 2)
assert torch.equal(mod.forward_step(x[:, :, 0]), x[:, :, 0] * 2)
mod.eval()
assert torch.equal(mod.forward(x), x * 3)
assert torch.equal(mod.forward_steps(x), x * 3)
assert torch.equal(mod.forward_step(x[:, :, 0]), x[:, :, 0] * 3)
def test_conditional_delay():
# if_true.delay < if_false.delay
mod = co.Conditional(lambda a, b: True, co.Delay(2), co.Delay(3))
assert mod.delay == 3
assert mod._modules["0"].delay == 3
assert mod._modules["1"].delay == 3
# if_true.delay > if_false.delay
mod = co.Conditional(lambda a, b: True, co.Delay(3), co.Delay(2))
assert mod.delay == 3
assert mod._modules["0"].delay == 3
assert mod._modules["1"].delay == 3
def test_condition_torch_modules():
mod = co.Conditional(
lambda a, b: True,
torch.nn.Sigmoid(),
torch.nn.Softmax(),
)
assert (
mod.__repr__()
== "Conditional(\n predicate=lambda a, b: True\n (0): Sigmoid()\n (1): Softmax(dim=None)\n)"
)
def test_broadcast():
x = 42
mod = co.Broadcast(2)
assert mod.delay == 0
assert mod.forward(x) == [x, x]
assert mod.forward_step(x) == [x, x]
assert mod.forward_steps(x) == [x, x]
def test_parallel():
x = torch.randn((1, 1, 3))
xx = [x, x]
c3 = co.Conv1d(1, 1, kernel_size=3, padding=1, bias=False)
c1 = co.Conv1d(1, 1, kernel_size=1, padding=0, bias=False)
par = co.Parallel(OrderedDict([("c3", c3), ("c1", c1)]))
assert par.delay == 1
assert par.padding == 1
assert par.stride == 1
o1 = par.forward(xx)
assert torch.equal(c3.forward(x), o1[0])
assert torch.equal(c1.forward(x), o1[1])
o2 = par.forward_steps(xx, pad_end=True, update_state=False)
assert torch.equal(c3.forward_steps(x, pad_end=True), o2[0])
assert torch.equal(c1.forward_steps(x, pad_end=True), o2[1])
par.clean_state()
par.forward_step([x[:, :, 0], x[:, :, 0]], update_state=True)
o3 = par.forward_step([x[:, :, 1], x[:, :, 1]], update_state=False)
assert torch.equal(c3.forward_step(x[:, :, 1]), o3[0])
assert torch.equal(c1.forward_step(x[:, :, 0]), o3[1]) # x[:,:,0] due to auto delay
def test_reduce():
x = torch.tensor([[[1.0, 2.0]]])
xx = [x, x]
mod = co.Reduce("sum")
assert mod.delay == 0
assert torch.equal(mod.forward(xx), torch.tensor([[[2.0, 4.0]]]))
assert torch.equal(mod.forward_steps(xx), torch.tensor([[[2.0, 4.0]]]))
assert torch.equal(
mod.forward_step([x[:, :, 0], x[:, :, 0]]), torch.tensor([[2.0]])
)
def test_parallel_sequential():
x = torch.arange(7, dtype=torch.float).reshape((1, 1, 7))
# Test two equivalent implementations
# First
c5 = co.Conv1d(1, 1, kernel_size=5, padding=2, bias=False)
c3 = co.Conv1d(1, 1, kernel_size=3, padding=1, bias=False)
c1 = co.Conv1d(1, 1, kernel_size=1, padding=0, bias=False)
torch.nn.init.ones_(c5.weight)
torch.nn.init.ones_(c3.weight)
torch.nn.init.ones_(c1.weight)
mod1 = co.BroadcastReduce(c5, c3, c1, reduce="sum")
# Second
c5 = co.Conv1d(1, 1, kernel_size=5, padding=2, bias=False)
c3 = co.Conv1d(1, 1, kernel_size=3, padding=1, bias=False)
c1 = co.Conv1d(1, 1, kernel_size=1, padding=0, bias=False)
torch.nn.init.ones_(c5.weight)
torch.nn.init.ones_(c3.weight)
torch.nn.init.ones_(c1.weight)
mod2 = co.Sequential(
co.Broadcast(), # Sequential can infer broadcast dimensions
co.Parallel(c5, c3, c1),
co.Reduce("sum"),
)
# Compare
o1 = mod1.forward(x)
o2 = mod2.forward(x)
assert torch.equal(o1, o2)
def test_parallel_dispatch():
with pytest.raises(AssertionError):
co.ParallelDispatch([1.0, "nah"])
inputs = [10, 11, 12]
mapping = [2, 0, [0, 2], 2]
module = co.ParallelDispatch(mapping)
outputs1 = module.forward(inputs)
outputs2 = module.forward_step(inputs)
outputs3 = module.forward_steps(inputs)
assert outputs1 == [12, 10, [10, 12], 12]
assert outputs2 == [12, 10, [10, 12], 12]
assert outputs3 == [12, 10, [10, 12], 12]
| 31.52514
| 143
| 0.624845
| 2,518
| 16,929
| 4.046068
| 0.090945
| 0.061543
| 0.047114
| 0.020612
| 0.660777
| 0.616608
| 0.542992
| 0.521692
| 0.471731
| 0.426875
| 0
| 0.047065
| 0.218087
| 16,929
| 536
| 144
| 31.583955
| 0.722596
| 0.06037
| 0
| 0.378378
| 0
| 0.005405
| 0.024773
| 0.00145
| 0
| 0
| 0
| 0
| 0.3
| 1
| 0.054054
| false
| 0
| 0.018919
| 0.005405
| 0.078378
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
abfd898f1fd7a927dd38c972caa71877640b65cc
| 2,728
|
py
|
Python
|
quotatron.py
|
ItsPonks/DiscordBots
|
062c4e6b33835fc3cde391011125d4cfd7ae1c6a
|
[
"MIT"
] | null | null | null |
quotatron.py
|
ItsPonks/DiscordBots
|
062c4e6b33835fc3cde391011125d4cfd7ae1c6a
|
[
"MIT"
] | null | null | null |
quotatron.py
|
ItsPonks/DiscordBots
|
062c4e6b33835fc3cde391011125d4cfd7ae1c6a
|
[
"MIT"
] | null | null | null |
from app import SlashBot
from async_timeout import timeout
from asyncio import TimeoutError
from datetime import datetime, timedelta, timezone
from hikari import ButtonStyle, CacheSettings, Member, UNDEFINED
from random import choice, sample, uniform
bot = SlashBot(cache_settings=CacheSettings(max_messages=10000))
async def find(context, success, failure, *members):
channel = context.get_channel()
guild = context.get_guild()
link = None
a = datetime.now(timezone.utc)
try:
async with timeout(900) as to:
content = ''
messages = set()
for i, member in enumerate(members):
predicates = [lambda m: m.content and not (m.mentions.everyone or m.mentions.role_ids or m.mentions.users) and m.content not in messages and '://' not in m.content]
attrs = dict(attachments=[], embeds=[], stickers=[])
if member:
b = max(channel.created_at, member.joined_at)
attrs['author'] = member.user
else:
b = channel.created_at
predicates.append(lambda m: not m.author.is_bot and m.author.discriminator != '0000')
until = timedelta(seconds=(to.deadline - to._loop.time()) / (len(members) - i)) + datetime.now()
while datetime.now() < until:
if history := await channel.fetch_history(around=uniform(a, b)).limit(101).filter(*predicates, **attrs):
m = choice(history)
link = m.make_link(guild)
messages.add(m)
content += success.format(username=m.author.username, content=m.content.replace('\n', ' \\ '), date=m.timestamp.date())
break
if len(content) <= 2000:
return content or failure, link
except TimeoutError:
return 'All attempts at finding quotes exceeded the maximum length.', None
@bot.slash('Randomly quotes members in this channel.')
async def convo(context, *members: ('Quote whom?', Member), count: ('How many?', int) = 5):
content, _ = await find(context, '{username}: {content}\n', 'No messages found.', *sample(members, len(members)) or [None] * min(count, 100))
await context.respond(content)
@bot.slash('Randomly quotes a member in this channel.')
async def quote(context, member: ('Quote whom?', Member) = None):
content, link = await find(context, '"{content}" -{username}, {date}', 'No message found.', member)
await context.respond(content, component=bot.button('Original', ButtonStyle.LINK, link) if link else UNDEFINED)
bot.run()
| 50.518519
| 181
| 0.606305
| 319
| 2,728
| 5.141066
| 0.420063
| 0.02439
| 0.013415
| 0.026829
| 0.02561
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011622
| 0.27456
| 2,728
| 53
| 182
| 51.471698
| 0.817079
| 0
| 0
| 0
| 0
| 0
| 0.10729
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.130435
| 0
| 0.173913
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
abff8a668f2d6433edc42fc8d0de063e6d1e1a69
| 2,745
|
py
|
Python
|
hpotter/src/one_way_thread.py
|
LarsenClose/dr.hpotter
|
ef6199ab563a92f3e4916277dbde9217126f36a9
|
[
"MIT"
] | 1
|
2021-08-15T09:24:20.000Z
|
2021-08-15T09:24:20.000Z
|
hpotter/src/one_way_thread.py
|
LarsenClose/dr.hpotter
|
ef6199ab563a92f3e4916277dbde9217126f36a9
|
[
"MIT"
] | 18
|
2021-02-01T21:58:20.000Z
|
2021-05-24T17:10:25.000Z
|
hpotter/src/one_way_thread.py
|
LarsenClose/dr.hpotter
|
ef6199ab563a92f3e4916277dbde9217126f36a9
|
[
"MIT"
] | 1
|
2021-06-19T12:49:54.000Z
|
2021-06-19T12:49:54.000Z
|
''' Threads that go to/from containers, limit data and lines, and insert
data into the Data table. '''
import threading
from src import tables
from src.logger import logger
from src.lazy_init import lazy_init
class OneWayThread(threading.Thread):
''' One thread to/from container. '''
# pylint: disable=E1101, W0613
@lazy_init
def __init__(self, source, dest, connection, container, direction, database):
super().__init__()
self.length = self.container.get(self.direction + '_length', 4096)
self.commands = self.container.get(self.direction + '_commands', 10)
self.delimiters = self.container.get(self.direction + '_delimiters', ['\n', '\r'])
self.shutdown_requested = False
def _read(self):
logger.debug('%s reading from: %s', self.direction, str(self.source))
data = self.source.recv(4096)
logger.debug('%s read: %s', self.direction, str(data))
return data
def _write(self, data):
logger.debug('%s sending to: %s', self.direction, str(self.dest))
self.dest.sendall(data)
logger.debug('%s sent: %s', self.direction, str(data))
def _too_many_commands(self, data):
if self.commands > 0:
sdata = str(data)
count = 0
for delimiter in self.delimiters:
count = max(count, sdata.count(delimiter))
if count >= self.commands:
logger.info('Commands exceeded, stopping')
return True
return False
def run(self):
total = b''
while True:
try:
data = self._read()
if not data or data == b'':
break
self._write(data)
except Exception as exception:
logger.debug('%s %s', self.direction, str(exception))
break
total += data
if self.shutdown_requested:
break
if self.length > 0 and len(total) >= self.length:
logger.debug('Length exceeded')
break
if self._too_many_commands(data):
break
logger.debug(self.length)
logger.debug(len(total))
logger.debug(self.direction)
save = self.direction + '_save'
if (save in self.container and self.container[save]) and (self.length > 0 and len(total) > 0):
self.database.write(tables.Data(direction=self.direction,
data=str(total), connection=self.connection))
self.source.close()
self.dest.close()
def shutdown(self):
''' Called from external source when HPotter shutting down. '''
self.shutdown_requested = True
| 33.072289
| 102
| 0.579964
| 320
| 2,745
| 4.8875
| 0.29375
| 0.091432
| 0.038363
| 0.054348
| 0.137468
| 0.028133
| 0
| 0
| 0
| 0
| 0
| 0.012099
| 0.307468
| 2,745
| 82
| 103
| 33.47561
| 0.810626
| 0.077596
| 0
| 0.081967
| 0
| 0
| 0.056108
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.098361
| false
| 0
| 0.065574
| 0
| 0.229508
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2804d5dc2b4f1fcfefbc0acd3532b59ffc96d9f6
| 1,827
|
py
|
Python
|
util/levels.py
|
Xetera/IreneBot
|
e768bb3a0d2517ecb00d50da89d66ac0dd1498d0
|
[
"MIT"
] | 1
|
2021-10-02T16:05:11.000Z
|
2021-10-02T16:05:11.000Z
|
util/levels.py
|
Xetera/IreneBot
|
e768bb3a0d2517ecb00d50da89d66ac0dd1498d0
|
[
"MIT"
] | null | null | null |
util/levels.py
|
Xetera/IreneBot
|
e768bb3a0d2517ecb00d50da89d66ac0dd1498d0
|
[
"MIT"
] | null | null | null |
from Utility import resources as ex
# noinspection PyPep8
class Levels:
@staticmethod
async def get_level(user_id, command):
"""Get the level of a command (rob/beg/daily)."""
count = ex.first_result(
await ex.conn.fetchrow(f"SELECT COUNT(*) FROM currency.Levels WHERE UserID = $1 AND {command} > $2",
user_id, 1))
if not count:
level = 1
else:
level = ex.first_result(
await ex.conn.fetchrow(f"SELECT {command} FROM currency.Levels WHERE UserID = $1", user_id))
return int(level)
@staticmethod
async def set_level(user_id, level, command):
"""Set the level of a user for a specific command."""
async def update_level():
"""Updates a user's level."""
await ex.conn.execute(f"UPDATE currency.Levels SET {command} = $1 WHERE UserID = $2", level, user_id)
count = ex.first_result(await ex.conn.fetchrow(f"SELECT COUNT(*) FROM currency.Levels WHERE UserID = $1", user_id))
if not count:
await ex.conn.execute("INSERT INTO currency.Levels VALUES($1, NULL, NULL, NULL, NULL, 1)", user_id)
await update_level()
else:
await update_level()
@staticmethod
async def get_xp(level, command):
"""Returns money/experience needed for a certain level."""
if command == "profile":
return 250 * level
return int((2 * 350) * (2 ** (level - 2))) # 350 is base value (level 1)
@staticmethod
async def get_rob_percentage(level):
"""Get the percentage of being able to rob. (Every 1 is 5%)"""
chance = int(6 + (level // 10)) # first 10 levels is 6 for 30% chance
if chance > 16:
chance = 16
return chance
| 38.87234
| 123
| 0.586207
| 242
| 1,827
| 4.35124
| 0.318182
| 0.039886
| 0.052232
| 0.065527
| 0.226971
| 0.226971
| 0.226971
| 0.226971
| 0.187085
| 0.150047
| 0
| 0.029874
| 0.303777
| 1,827
| 46
| 124
| 39.717391
| 0.797956
| 0.04543
| 0
| 0.294118
| 0
| 0
| 0.210208
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.029412
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9eaf3d0c02077230ba993a6dcd5aeadb2b3f5df
| 1,533
|
py
|
Python
|
Hydraslayer/Utility.py
|
Jorisvansteenbrugge/GapFiller
|
ff879935765ed47eafcc0f38e47042694657d961
|
[
"MIT"
] | null | null | null |
Hydraslayer/Utility.py
|
Jorisvansteenbrugge/GapFiller
|
ff879935765ed47eafcc0f38e47042694657d961
|
[
"MIT"
] | null | null | null |
Hydraslayer/Utility.py
|
Jorisvansteenbrugge/GapFiller
|
ff879935765ed47eafcc0f38e47042694657d961
|
[
"MIT"
] | null | null | null |
import logging
logger = logging.getLogger("Hydraslayer")
def get_consensusbase(bases, mincov=3):
"""
:param mincov:
:type bases: list
"""
bases = "".join(bases)
a = bases.count('A')
t = bases.count('T')
c = bases.count('C')
g = bases.count('G')
n = bases.count("N") + bases.count('-')
counts = [(a, 'A'), (t, 'T'), (c, 'C'), (g, 'G')]
s_dic = sorted(counts, key=lambda x: x[0], reverse=True)
max = s_dic[0]
if max[0] < mincov:
return "N"
else:
return max[1]
def get_gap_pos_alignment(record):
sequence = str(record.seq)
N_pos = [x for x, nuc in enumerate(sequence) if nuc == "N"]
return N_pos
def extract_positions(seq, positions):
return "".join([seq[idx] for idx in positions])
def pretty_print_alignment(fasta_sequences):
alignment_len = len(fasta_sequences[0])
for x in range(alignment_len):
row = []
for alignment in fasta_sequences:
row.append(alignment[x])
print(" ".join(row))
def get_consensus(fasta_seqs, mincov):
"""Get the per-position consensus sequence, excluding gaps.
All read sequences (not the assembly sequence) are merged into a consensus sequence.
"""
consensus = []
alignment_len = len(fasta_seqs[0])
for x in range(alignment_len):
bases = [fasta[x] for fasta in fasta_seqs]
consensus.append(get_consensusbase(bases, mincov))
# logger.debug(f'Consensus {"".join(consensus)}')
return "".join(consensus)
| 23.227273
| 92
| 0.613177
| 208
| 1,533
| 4.408654
| 0.355769
| 0.065431
| 0.045802
| 0.058888
| 0.052345
| 0.052345
| 0.052345
| 0
| 0
| 0
| 0
| 0.006009
| 0.240052
| 1,533
| 65
| 93
| 23.584615
| 0.781116
| 0.145466
| 0
| 0.055556
| 0
| 0
| 0.018883
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.138889
| false
| 0
| 0.027778
| 0.027778
| 0.305556
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9eb5ae0a2f921af7f1e7867be9f3e3a70e3a5d2
| 5,094
|
py
|
Python
|
src/student/student_model.py
|
YoshikiKubotani/RLTutor
|
17d7cd274d0ea5ef69a4590b01ab905a4ed58463
|
[
"MIT"
] | 3
|
2021-08-10T09:57:42.000Z
|
2022-03-10T12:52:56.000Z
|
src/student/student_model.py
|
YoshikiKubotani/rltutor
|
17d7cd274d0ea5ef69a4590b01ab905a4ed58463
|
[
"MIT"
] | 1
|
2021-04-17T03:35:33.000Z
|
2021-04-23T15:58:02.000Z
|
src/student/student_model.py
|
YoshikiKubotani/rltutor
|
17d7cd274d0ea5ef69a4590b01ab905a4ed58463
|
[
"MIT"
] | null | null | null |
import copy
import numpy as np
from collections import defaultdict
import utils
class DAS3HStudent:
def __init__(self, time_weight, n_items, n_skills, seed):
np.random.seed(seed)
self.alpha = np.random.normal(loc=-1.5, scale=0.3, size=1)
self.delta = np.random.normal(loc=-1.0, scale=0.5, size=n_items)
self.beta = np.random.normal(loc=-1.0, scale=0.5, size=n_skills)
self.time_weight = time_weight
self.weight = np.hstack((self.delta, self.beta, self.time_weight))
self.h = 0.3
self.d = 0.8
def predict_proba(self, input_sparse_vec, lag):
for_sigmoid = self.alpha + np.dot(self.weight, input_sparse_vec)
ret = 1 / (1 + np.exp(-for_sigmoid)[0])
ret = (1 - ret) * (1 + self.h * lag) ** (-self.d) + ret
return ret
class StudentModel(object):
def __init__(
self, n_items, n_skills, n_wins, seed, item_skill_mat, model
):
self.name = "DAS3H"
np.random.seed(seed)
self.n_items = n_items
self.n_skills = n_skills
self.n_wins = n_wins
self.predictor = model
self.item_skill_mat = item_skill_mat
self.n_item_feats = int(np.log(2 * self.n_items))
self.item_feats = np.random.normal(
np.zeros(2 * self.n_items * self.n_item_feats),
np.ones(2 * self.n_items * self.n_item_feats),
).reshape((2 * self.n_items, self.n_item_feats))
self.now = 0
self.last_time = defaultdict(lambda: -10)
self.curr_item = np.random.randint(self.n_items)
self.q = defaultdict(lambda: utils.OurQueue())
self.curr_outcome = 0
self.curr_delay = 0
self.skill_ids = None
def _make_input_vec(self, selected_item_id, now_q):
item_vec = np.zeros(self.n_items)
skill_vec = np.zeros(self.n_skills)
correct_vec = np.zeros(self.n_wins * self.n_skills)
attempt_vec = np.zeros(self.n_wins * self.n_skills)
item_vec[selected_item_id] = 1
index_of_selected_skills = np.argwhere(
self.item_skill_mat[selected_item_id] == 1
)
self.skill_ids = index_of_selected_skills.transpose()[0].tolist()
self.skill_ids = list(set(self.skill_ids))
skill_vec[self.skill_ids] = 1
for skill_id in self.skill_ids:
correct_vec[skill_id * self.n_wins : (skill_id + 1) * self.n_wins] = np.log(
1 + np.array(now_q[skill_id, "correct"].get_counters(self.now))
)
attempt_vec[skill_id * self.n_wins : (skill_id + 1) * self.n_wins] = np.log(
1 + np.array(now_q[skill_id].get_counters(self.now))
)
return_np_vec = np.hstack((item_vec, skill_vec, correct_vec, attempt_vec))
return return_np_vec
def _encode_delay(self):
v = np.zeros(2)
v[self.curr_outcome] = np.log(1 + self.curr_delay)
return v
def _encode_delay2(self):
v = np.zeros(2)
delay = self.curr_delay
if len(self.q.queue) != 0:
delay = self.now - self.q.queue[-1]
v[self.curr_outcome] = np.log(1 + delay)
return v
def _vectorized_obs(self):
encoded_item = self.item_feats[
self.n_items * self.curr_outcome + self.curr_item, :
]
return np.hstack(
(encoded_item, self._encode_delay(), np.array([self.curr_outcome]))
)
def step(self, action, now):
self.curr_item = action
self.curr_delay = now - self.now
self.now += self.curr_delay
input_vec = self._make_input_vec(self.curr_item, copy.deepcopy(self.q))
lag = self.now - self.last_time[self.curr_item]
recall_prob = self.predictor.predict_proba(input_vec, lag)
self.curr_outcome = 1 if np.random.random() < recall_prob else 0
self._update_model()
obs = self._vectorized_obs()
return self.curr_outcome, obs
def _update_model(self):
self.last_time[self.curr_item] = self.now
for skill_id in self.skill_ids:
_ = self.q[skill_id, "correct"].get_counters(self.now)
_ = self.q[skill_id].get_counters(self.now)
if self.curr_outcome == 1:
self.q[skill_id, "correct"].push(self.now)
self.q[skill_id].push(self.now)
def get_retention_rate(self):
retention_rate_list = []
curr_q = copy.deepcopy(self.q)
for item in range(self.n_items):
input_vec = self._make_input_vec(item, curr_q)
lag = self.now - self.last_time[item]
recall_prob = self.predictor.predict_proba(input_vec, lag)
retention_rate_list.append(recall_prob)
return retention_rate_list
def reset(self, seed):
np.random.seed(seed)
self.now = 0
self.last_time = defaultdict(lambda: -10)
self.curr_item = np.random.randint(self.n_items)
self.q = defaultdict(lambda: utils.OurQueue())
self.curr_outcome = 0
self.curr_delay = 0
self.skill_ids = None
| 35.622378
| 88
| 0.611504
| 745
| 5,094
| 3.916779
| 0.154362
| 0.044551
| 0.037697
| 0.033585
| 0.428376
| 0.383139
| 0.332762
| 0.270391
| 0.224812
| 0.204249
| 0
| 0.015102
| 0.272085
| 5,094
| 143
| 89
| 35.622378
| 0.771845
| 0
| 0
| 0.211864
| 0
| 0
| 0.005103
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09322
| false
| 0
| 0.033898
| 0
| 0.20339
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9ef1102a1c613bcb45da31777d89452e59168b6
| 2,625
|
py
|
Python
|
conversion_tools/misc_functions.py
|
xiaoping-yang/ms2pip_c
|
061fcd8aa8c315b775ac64f5c1f7dfe0a09bdea9
|
[
"Apache-2.0"
] | 14
|
2018-08-21T04:58:22.000Z
|
2022-03-21T11:40:12.000Z
|
conversion_tools/misc_functions.py
|
xiaoping-yang/ms2pip_c
|
061fcd8aa8c315b775ac64f5c1f7dfe0a09bdea9
|
[
"Apache-2.0"
] | 69
|
2018-05-23T12:52:16.000Z
|
2022-03-14T20:42:49.000Z
|
conversion_tools/misc_functions.py
|
xiaoping-yang/ms2pip_c
|
061fcd8aa8c315b775ac64f5c1f7dfe0a09bdea9
|
[
"Apache-2.0"
] | 10
|
2019-04-28T01:24:10.000Z
|
2022-03-04T18:37:47.000Z
|
"""
Miscellaneous functions regarding MS2PIP file conversions.
"""
import re
import pandas as pd
def add_fixed_mods(peprec, fixed_mods=None, n_term=None, c_term=None):
"""
Add 'fixed' modifications to all peptides in an MS2PIP PEPREC file.
Return list with MS2PIP modifications with fixed mods added.
Positional arguments:
peprec - MS2PIP PEPREC DataFrame
Keyword arguments:
fixed_mods - List of tuples. First tuple element is amino acid, second tuple
element is modification name. E.g. `[('K', 'TMT6plex')]`
n_term - Name of fixed N-terminal modification to add
c_term - Name of fixed C-terminal modification to add
"""
if not fixed_mods:
fixed_mods = []
result = []
for _, row in peprec.iterrows():
mods = row['modifications']
if mods == '-':
mods = []
else:
mods = mods.split('|')
current_mods = list(zip([int(i) for i in mods[::2]], mods[1::2]))
for aa, mod in fixed_mods:
current_mods.extend([(m.start()+1, mod) for m in re.finditer(aa, row['peptide'])])
if n_term and not 0 in [i for i, n in current_mods]:
current_mods.append((0, n_term))
if c_term and not -1 in [i for i, n in current_mods]:
current_mods.append((-1, c_term))
current_mods = sorted(current_mods, key=lambda x: x[0])
current_mods = '|'.join(['|'.join(m) for m in [(str(i), n) for i, n in current_mods]])
result.append(current_mods)
return result
def peprec_add_charges(peprec_filename, mgf_filename, overwrite=False):
"""
Get precursor charges from MGF file and add them to a PEPREC
"""
peprec = pd.read_csv(peprec_filename, sep=' ', index_col=None)
if not overwrite and 'charge' in peprec.columns:
print('Charges already in PEPREC')
return None
spec_count = 0
charges = {}
with open(mgf_filename, 'rt') as f:
for line in f:
if line.startswith('TITLE='):
title = line[6:].strip()
spec_count += 1
if line.startswith('CHARGE='):
charge = line[7:].strip()
charges[title] = charge
if not spec_count == len(charges):
print('Something went wrong')
return None
peprec['charge'] = peprec['spec_id'].map(charges)
new_peprec_filename = re.sub('\.peprec$|\.PEPREC$', '', peprec_filename) + '_withcharges.peprec'
peprec.to_csv(new_peprec_filename, sep=' ', index=False)
print('PEPREC with charges written to ' + new_peprec_filename)
return peprec
| 30.882353
| 100
| 0.613333
| 359
| 2,625
| 4.350975
| 0.334262
| 0.077465
| 0.009603
| 0.013444
| 0.060179
| 0.060179
| 0.048656
| 0.048656
| 0.048656
| 0.048656
| 0
| 0.009375
| 0.268571
| 2,625
| 85
| 101
| 30.882353
| 0.804167
| 0.215619
| 0
| 0.043478
| 0
| 0
| 0.087437
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.043478
| 0
| 0.173913
| 0.065217
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9f160c2036c4a9de666283011c042c949cb5728
| 2,518
|
py
|
Python
|
docs/conf.py
|
maykinmedia/open-personen
|
ddcf083ccd4eb864c5305bcd8bc75c6c64108272
|
[
"RSA-MD"
] | 2
|
2020-08-26T11:24:43.000Z
|
2021-07-28T09:46:40.000Z
|
docs/conf.py
|
maykinmedia/open-personen
|
ddcf083ccd4eb864c5305bcd8bc75c6c64108272
|
[
"RSA-MD"
] | 153
|
2020-08-26T10:45:35.000Z
|
2021-12-10T17:33:16.000Z
|
docs/conf.py
|
maykinmedia/open-personen
|
ddcf083ccd4eb864c5305bcd8bc75c6c64108272
|
[
"RSA-MD"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
import os
import sys
import django
sys.path.insert(0, os.path.abspath("../src"))
import openpersonen # noqa isort:skip
from openpersonen.setup import setup_env # noqa isort:skip
setup_env()
django.setup()
# -- Project information -----------------------------------------------------
project = "Open Personen"
copyright = "2020, Maykin Media"
author = openpersonen.__author__
# The full version, including alpha/beta/rc tags
release = openpersonen.__version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.todo",
"sphinx_tabs.tabs",
"recommonmark",
# "sphinx_markdown_tables",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
source_suffix = [".rst", ".md"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_logo = "logo.png"
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_css_files = [
'theme_overrides.css', # override wide tables with word wrap
]
todo_include_todos = True
linkcheck_ignore = [
r"https?://.*\.gemeente.nl",
r"http://localhost:\d+/",
r"https://.*sentry.*",
]
| 29.97619
| 78
| 0.673153
| 326
| 2,518
| 5.088957
| 0.533742
| 0.007233
| 0.022905
| 0.018083
| 0.086799
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002336
| 0.150119
| 2,518
| 83
| 79
| 30.337349
| 0.772897
| 0.651311
| 0
| 0
| 0
| 0
| 0.27929
| 0.028402
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.151515
| 0
| 0.151515
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9f1a4c66877c2a359160ca92cd9a1a5bd503f2e
| 1,547
|
py
|
Python
|
tests/test_preprocessing.py
|
mitchelllisle/leonard
|
09f2f72d1f103813233c19950189a502868ce60a
|
[
"MIT"
] | null | null | null |
tests/test_preprocessing.py
|
mitchelllisle/leonard
|
09f2f72d1f103813233c19950189a502868ce60a
|
[
"MIT"
] | 27
|
2018-04-05T22:32:11.000Z
|
2018-12-09T21:04:12.000Z
|
tests/test_preprocessing.py
|
mitchelllisle/leonard
|
09f2f72d1f103813233c19950189a502868ce60a
|
[
"MIT"
] | null | null | null |
import pytest
from martha import negabs
from martha import normalise
from martha import labelEncoder
from martha import cleanUpString
from martha import medianFrequency
from martha import gini
import numpy as np
import pandas as pd
import json
from sklearn.preprocessing import LabelEncoder
# import os
# os.chdir("/Users/mitchell/Documents/projects/martha")
def test_negabs():
data = pd.read_csv("data/marvelMovies.csv")
result = data.assign(ProductionBudget = data.ProductionBudget.apply(lambda x: negabs(x)))
result = result.ProductionBudget.apply(lambda x: x < 0)
assert any(result) == True
def test_normalise():
data = pd.read_csv("data/marvelMovies.csv")
result = data.assign(score = normalise(data['ProductionBudget']))
assert 'score' in result.columns
def test_labelEncoder():
data = pd.read_csv("data/fifaAbilities.csv")
result = data.assign(preferred_foot_encoded = labelEncoder(data, "preferred_foot"))
assert result.preferred_foot_encoded.dtype == 'int64'
def test_cleanUpString():
data = "test, \n"
result = cleanUpString(data, strip_chars = [','], replace_extras = {"t" : "--"})
assert result == '--es--'
def test_medianFrequency():
data = {"values" : [1,2,4], "repeats" : [4,4,2]}
values = pd.Series(data['values'])
repeats = pd.Series(data['repeats'])
computedMedian = medianFrequency(values, repeats)
assert computedMedian == 2
def test_gini():
data = pd.read_csv("data/fifaAbilities.csv")
assert gini(data['marking']) == 0.3441157017683561
| 31.571429
| 93
| 0.716225
| 194
| 1,547
| 5.623711
| 0.335052
| 0.054995
| 0.087993
| 0.047663
| 0.148488
| 0.148488
| 0.148488
| 0.087993
| 0.087993
| 0.087993
| 0
| 0.020658
| 0.155139
| 1,547
| 48
| 94
| 32.229167
| 0.814078
| 0.040724
| 0
| 0.108108
| 0
| 0
| 0.119514
| 0.058069
| 0
| 0
| 0
| 0
| 0.162162
| 1
| 0.162162
| false
| 0
| 0.297297
| 0
| 0.459459
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9f1e55a1cfe8220ba10d8a734bbe623347c9690
| 7,806
|
py
|
Python
|
Recommendation_System/Research_Recommender/Research_Recommender_Clustering.py
|
quangnguyendang/Reinforcement_Learning
|
2551ce95068561c553500838ee6b976f001ba667
|
[
"MIT"
] | null | null | null |
Recommendation_System/Research_Recommender/Research_Recommender_Clustering.py
|
quangnguyendang/Reinforcement_Learning
|
2551ce95068561c553500838ee6b976f001ba667
|
[
"MIT"
] | null | null | null |
Recommendation_System/Research_Recommender/Research_Recommender_Clustering.py
|
quangnguyendang/Reinforcement_Learning
|
2551ce95068561c553500838ee6b976f001ba667
|
[
"MIT"
] | null | null | null |
# Example of CBF for research-paper domain
# Nguyen Dang Quang
from nltk.stem.snowball import SnowballStemmer
import pandas as pd
from nltk.corpus import stopwords
# --------------------------------------------------------
user_input_data = "It is known that the performance of an optimal control strategy obtained from an off-line " \
"computation is degraded under the presence of model mismatch. In order to improve the control " \
"performance, a hybrid neural network and on-line optimal control strategy are proposed in this " \
"study and demonstrated for the control of a fed-batch bioreactor for ethanol fermentation. The " \
"information of the optimal feed profile of the fed-batch reactor. The simulation results show " \
"that the neural network provides a good estimate of unmeasured variables and the on-line optimal " \
"control with the neural network estimator gives a better control performance in terms of the " \
"amount of the desired ethanol product, compared with a conventional off-line optimal control " \
"method."
user_title = "user undefined title"
# --------------------------------------------------------
metadata = pd.read_json('sample-records', lines=True)
user_data = pd.DataFrame([[user_input_data, user_title]], columns=['paperAbstract', 'title'])
metadata = pd.concat([metadata, user_data], sort=True).fillna('')
filter_na = metadata["paperAbstract"] != ''
metadata = metadata[filter_na]
# Lower the characters
def clean_data(x):
if isinstance(x, list):
list_data = []
for i in x:
list_data.append(str.lower(str(i)))
return list_data
elif isinstance(x, str):
return str.lower(str(x))
else:
return ' '
# turn list of string items into string
def get_string(x):
if isinstance(x, list):
names = ''
for i in x:
names = names + i['name'] + " "
return names
else:
return str(x)
# turn list of entity string items into string
def get_entity(x):
if isinstance(x, list):
names = ''
for i in x:
names = names + str(i) + " "
return names
else:
return str(x)
# Apply clean_data function to your features.
features = ['authors', 'title', 'journalName', 'paperAbstract']
for feature in features:
metadata[feature] = metadata[feature].apply(get_string)
metadata[feature] = metadata[feature].apply(clean_data)
metadata['entities'] = metadata['entities'].apply(get_entity)
# Create metadata soup
def create_soup(x):
return x['journalName'] + ' ' + x['title'] + ' ' + x['title'] + ' ' + x['paperAbstract'] + ' ' + x['entities'] + ' ' + x['entities'] + ' ' + x['entities']
metadata['soup'] = metadata.apply(create_soup, axis=1)
# --------------------------------------------------------
stemmer = SnowballStemmer("english")
def word_stem_and_stopword_remove(x1):
x = x1['soup']
final = ''
for y in x.split(' '):
if y not in stopwords.words('english'):
final = final + stemmer.stem(y) + ' '
return final
metadata['filtered'] = metadata.apply(word_stem_and_stopword_remove, axis=1)
# Print metadata for the first 5 films
with pd.option_context('display.max_rows', None, 'display.max_columns', 10, 'display.max_colwidth', 100):
print(metadata[['soup', 'filtered']].head(5))
print('\n\n Done Pre-processing Data \n\n')
# --------------------------------------------------------
# TfidfVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
tvec = TfidfVectorizer(min_df=1, max_df=10, stop_words='english', ngram_range=(1, 2))
tvec_weights = tvec.fit_transform(metadata.filtered.dropna())
# --------------------------------------------------------
# Classifier for User's Text - K-MEAN - http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=24, random_state=0).fit(tvec_weights)
# print('metadata shape = ', metadata.filtered.shape)
# print('k-mean shape = ', kmeans.labels_.shape)
metadata['cluster_number_kmean'] = kmeans.labels_
# User Data and similar papers
def find_cluster_data_kmean(title):
print('\n\nSimilar papers using K-mean Clustering: \n')
filter_title = metadata['title'] == title
user_cluster = str(metadata.loc[filter_title].cluster_number_kmean.item())
similar_papers = metadata.loc[metadata['cluster_number_kmean'] == int(user_cluster)].title
with pd.option_context('display.max_rows', None, 'display.max_columns', 10, 'display.max_colwidth', -1):
print(similar_papers)
find_cluster_data_kmean(user_title)
# --------------------------------------------------------
# Classifier for User's Text - Birch - http://scikit-learn.org/stable/modules/generated/sklearn.cluster.Birch.html#sklearn.cluster.Birch
from sklearn.cluster import Birch
brc = Birch(branching_factor=50, n_clusters=24, threshold=0.5, compute_labels=True)
brc.fit(tvec_weights)
metadata['cluster_number_birch'] = brc.labels_
# User Data and similar papers
def find_cluster_data_birch(title):
print('\n\nSimilar papers using Birch Clustering: \n')
filter_title = metadata['title'] == title
user_cluster = str(metadata.loc[filter_title].cluster_number_birch.item())
similar_papers = metadata.loc[metadata['cluster_number_birch'] == int(user_cluster)].title
with pd.option_context('display.max_rows', None, 'display.max_columns', 10, 'display.max_colwidth', -1):
print(similar_papers)
find_cluster_data_birch(user_title)
# --------------------------------------------------------
# Classifier for User's Text - Agglomerative Clustering - http://scikit-learn.org/stable/modules/clustering.html#hierarchical-clustering
from sklearn.cluster import AgglomerativeClustering
# User Data and similar papers
def find_cluster_data_Agglomerative(title):
print('Similar papers using Agglomerative Clustering: \n')
filter_title = metadata['title'] == title
user_cluster = str(metadata.loc[filter_title].cluster_number_Agglomerative.item())
similar_papers = metadata.loc[metadata['cluster_number_Agglomerative'] == int(user_cluster)].title
with pd.option_context('display.max_rows', None, 'display.max_columns', 10, 'display.max_colwidth', -1):
print(similar_papers)
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=24)
clustering.fit(tvec_weights.toarray())
metadata['cluster_number_Agglomerative'] = clustering.labels_
print('\n\nAgglomerative Clustering - ', linkage)
find_cluster_data_Agglomerative(user_title)
# --------------------------------------------------
# COSINE SIMILARITY
from sklearn.metrics.pairwise import cosine_similarity
# Find the similar movie
def get_recommendations(title, cosine_sim):
# Get the index of the paper that matches to the title
idx = indices.loc[title]
# Get the pairwise similarity scores of all paper with that movie
sim_scores = list(enumerate(cosine_sim[idx]))
# Sort the papers based on the similarity scores
sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
sim_scores = sim_scores[0:5]
# Get the paper indices
paper_indices = [i[0] for i in sim_scores]
return metadata['title'].iloc[paper_indices]
cosine_sim = cosine_similarity(tvec_weights, tvec_weights)
indices = pd.Series(metadata.index, index=metadata['title'])
print('\n\nSimilar paper using Cosine Similarity: \n')
with pd.option_context('display.max_rows', None, 'display.max_columns', 10, 'display.max_colwidth', -1):
print(get_recommendations(user_title, cosine_sim))
| 38.264706
| 158
| 0.668204
| 984
| 7,806
| 5.151423
| 0.255081
| 0.029592
| 0.024857
| 0.018741
| 0.330834
| 0.29927
| 0.260801
| 0.24857
| 0.21957
| 0.189978
| 0
| 0.00661
| 0.166667
| 7,806
| 203
| 159
| 38.453202
| 0.772636
| 0.195491
| 0
| 0.213675
| 0
| 0
| 0.272494
| 0.008966
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.068376
| 0.008547
| 0.230769
| 0.094017
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9f282edbab72aad8652828b7e20e00859536805
| 5,625
|
py
|
Python
|
extract_from_stata/model/regressions.py
|
gn0/extract-from-stata
|
52f94f65d28aee257c2a46ca82149e14ab74fbda
|
[
"CC-BY-4.0"
] | null | null | null |
extract_from_stata/model/regressions.py
|
gn0/extract-from-stata
|
52f94f65d28aee257c2a46ca82149e14ab74fbda
|
[
"CC-BY-4.0"
] | null | null | null |
extract_from_stata/model/regressions.py
|
gn0/extract-from-stata
|
52f94f65d28aee257c2a46ca82149e14ab74fbda
|
[
"CC-BY-4.0"
] | null | null | null |
import re
import collections
import extract_from_stata.model.common
def is_beginning_of_table(line):
return (line.startswith("Linear regression")
or re.match(r"^ +Source \| +SS +df +MS", line) is not None
or line.startswith("Negative binomial regression")
or line.startswith("HDFE Linear regression")
# reghdfe IV estimation:
or line.startswith("Estimates efficient for homoskedasticity only")
# margins:
or line.startswith("Average marginal effects")) # XXX
def find_first_table_in(string):
table_string = ""
for line in string.splitlines(True):
if is_beginning_of_table(line):
table_string += line
elif table_string and not line.strip():
if re.search(r"^----+$", table_string, flags=re.M):
break
else:
table_string += line
elif table_string:
table_string += line
return table_string
assert (find_first_table_in(
"foo\nbar\nLinear regression\n\n----\nfoo\nbar\n\nlipsum\n")
== "Linear regression\n\n----\nfoo\nbar\n")
def param_to_show_categoricals(parameters):
return ("show_categoricals"
in set(param for param, value in parameters))
def param_to_show_constant(parameters):
return ("show_constant"
in set(param for param, value in parameters))
def extract_sample_size(table_string):
pattern = re.compile(r"Number of obs += +([\d,]+)")
match = pattern.search(table_string)
if match is None:
return None
return match.group(1).replace(",", "")
def extract_number_of_clusters(table_string):
pattern = re.compile(r"adjusted for (\d+) clusters in")
match = pattern.search(table_string)
if match is None:
return None
return match.group(1)
def _extract_depvar_from_table_header(table_string):
pattern = re.compile(r"^ *([A-Za-z0-9_~]+) +\| +Coef[.] ")
for line in table_string.splitlines():
match = pattern.match(line)
if match is None:
continue
return match.group(1)
return None # This shouldn't be reached.
def _extract_depvar_from_table_preamble(table_string):
pattern = re.compile(r"^Expression *: +Pr\(([^)]+)\)")
for line in table_string.splitlines():
match = pattern.match(line)
if match is not None:
return match.group(1)
return None
def extract_dependent_variable(table_string):
varname = _extract_depvar_from_table_preamble(table_string)
if varname is None:
varname = _extract_depvar_from_table_header(table_string)
return varname
def extract_categorical_variable(line):
pattern = re.compile(r"^ *([A-Za-z0-9_~#.]+) +\| *$")
match = pattern.match(line)
if match is None:
return None
return match.group(1)
def is_start_of_new_block(line):
return extract_categorical_variable(line) is not None
assert is_start_of_new_block(" birth_year |")
assert not is_start_of_new_block(
" saw_protest | .0500882 .0193186")
def is_end_of_block(line):
pattern = re.compile(r"^ +\| *$")
return line.startswith("----") or pattern.match(line) is not None
def extract_coefficients(table_string, parameters):
pattern = re.compile(r"^ *([A-Za-z0-9_~#. ]+) +\| +(-?[0-9.e-]+) +([0-9.e-]+) +-?[0-9.]+ +([0-9.]+)")
coefficients = collections.OrderedDict()
segment = "pre-header"
categorical_block = False
categorical_variable = None
for line in table_string.splitlines():
if segment == "pre-header" and re.match(r"^----+$", line):
segment = "header"
elif segment == "header" and re.match(r"^----+[+]-+$", line):
segment = "post-header"
elif segment == "post-header":
if categorical_block and is_end_of_block(line):
categorical_block = False
elif is_start_of_new_block(line):
categorical_block = True
categorical_variable = extract_categorical_variable(
line)
elif (categorical_block
and not param_to_show_categoricals(parameters)):
continue
else:
match = pattern.match(line)
if match is None:
continue
elif (match.group(1) == "_cons"
and not param_to_show_constant(parameters)):
continue
if categorical_block:
variable_name = "%s.%s" % (match.group(1).strip(),
categorical_variable)
else:
variable_name = match.group(1)
coefficients[variable_name] = (
dict(coefficient=match.group(2),
std_error=match.group(3),
p_value=match.group(4)))
return coefficients
def parse_regression(block):
table_string = find_first_table_in(block)
parameters = extract_from_stata.model.common.find_parameters_in(block)
return dict(sample_size=extract_sample_size(table_string),
number_of_clusters=extract_number_of_clusters(
table_string),
dependent_variable=extract_dependent_variable(
table_string),
coefficients=extract_coefficients(
table_string, parameters),
parameters=parameters)
| 29.450262
| 105
| 0.592889
| 645
| 5,625
| 4.937985
| 0.209302
| 0.089796
| 0.02763
| 0.037363
| 0.515856
| 0.344741
| 0.232339
| 0.164521
| 0.136264
| 0.095447
| 0
| 0.009891
| 0.299022
| 5,625
| 190
| 106
| 29.605263
| 0.79787
| 0.011022
| 0
| 0.307087
| 0
| 0.007874
| 0.118568
| 0.012594
| 0
| 0
| 0
| 0
| 0.023622
| 1
| 0.110236
| false
| 0
| 0.023622
| 0.031496
| 0.283465
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9f39222109d35dd54c9520459d1d2c222c58e13
| 1,173
|
py
|
Python
|
master/master/doctype/add_rate/add_rate.py
|
reddymeghraj/master
|
1f93748fb951e52edc28c1b8c150d1acacff1687
|
[
"MIT"
] | null | null | null |
master/master/doctype/add_rate/add_rate.py
|
reddymeghraj/master
|
1f93748fb951e52edc28c1b8c150d1acacff1687
|
[
"MIT"
] | null | null | null |
master/master/doctype/add_rate/add_rate.py
|
reddymeghraj/master
|
1f93748fb951e52edc28c1b8c150d1acacff1687
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2013, Wayzon and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class AddRate(Document):
def validate(self):
b=self.brand;
bt=self.brand_type;
r=self.rate;
q=frappe.db.sql("""select brand,brand_type from `tabAdd Rate` where brand=%s and brand_type=%s""",(b,bt))
if q:
q1=frappe.db.sql("""update `tabAdd Rate` set rate=%s where brand=%s and brand_type=%s""",(r,b,bt))
def before_insert(self):
b=self.brand;
bt=self.brand_type;
q=frappe.db.sql("""select brand,brand_type from `tabAdd Rate` where brand=%s and brand_type=%s""",(b,bt))
if q:
frappe.throw("Entry already exists for selected Brand,brandtype")
@frappe.whitelist()
def get_latest_purchase_rate(b,bt):
q=frappe.db.sql("""select p.date,pi.brand,pi.brand_name,pi.brand_type,pi.type_name,pi.rate
from `tabPurchaseinfo` pi,`tabPurchase` p
where p.name=pi.parent and pi.brand=%s and pi.brand_type=%s
order by date desc limit 1""",(b,bt))
if q:
r=q[0][5]
else:
frappe.msgprint("Selected Brand,brandtype are not purchased tll now")
r=0
return(r)
| 35.545455
| 107
| 0.72208
| 203
| 1,173
| 4.073892
| 0.374384
| 0.097944
| 0.053204
| 0.043531
| 0.304716
| 0.28295
| 0.28295
| 0.25393
| 0.183797
| 0.183797
| 0
| 0.008824
| 0.130435
| 1,173
| 33
| 108
| 35.545455
| 0.801961
| 0.077579
| 0
| 0.310345
| 0
| 0.034483
| 0.485185
| 0.059259
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0.103448
| 0
| 0.241379
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9f6b1140ade48b9437ab03cfc7f614535b44fee
| 1,267
|
py
|
Python
|
oxe-api/resource/private/get_my_companies.py
|
CybersecurityLuxembourg/openxeco
|
8d4e5578bde6a07f5d6d569b16b4de224abf7bf0
|
[
"BSD-2-Clause"
] | null | null | null |
oxe-api/resource/private/get_my_companies.py
|
CybersecurityLuxembourg/openxeco
|
8d4e5578bde6a07f5d6d569b16b4de224abf7bf0
|
[
"BSD-2-Clause"
] | null | null | null |
oxe-api/resource/private/get_my_companies.py
|
CybersecurityLuxembourg/openxeco
|
8d4e5578bde6a07f5d6d569b16b4de224abf7bf0
|
[
"BSD-2-Clause"
] | null | null | null |
from flask_apispec import MethodResource
from flask_apispec import doc
from flask_jwt_extended import jwt_required, get_jwt_identity
from flask_restful import Resource
from db.db import DB
from decorator.catch_exception import catch_exception
from decorator.log_request import log_request
from utils.serializer import Serializer
class GetMyCompanies(MethodResource, Resource):
def __init__(self, db: DB):
self.db = db
@log_request
@doc(tags=['private'],
description='Get the list of companies assigned to the user authenticated by the token',
responses={
"200": {},
})
@jwt_required
@catch_exception
def get(self):
subquery = self.db.session \
.query(self.db.tables["UserCompanyAssignment"]) \
.with_entities(self.db.tables["UserCompanyAssignment"].company_id) \
.filter(self.db.tables["UserCompanyAssignment"].user_id == get_jwt_identity()) \
.subquery()
data = Serializer.serialize(
self.db.session
.query(self.db.tables["Company"])
.filter(self.db.tables["Company"].id.in_(subquery))
.all()
, self.db.tables["Company"])
return data, "200 "
| 30.902439
| 97
| 0.649566
| 144
| 1,267
| 5.541667
| 0.388889
| 0.075188
| 0.090226
| 0.12406
| 0.075188
| 0.075188
| 0.075188
| 0
| 0
| 0
| 0
| 0.006309
| 0.249408
| 1,267
| 40
| 98
| 31.675
| 0.832808
| 0
| 0
| 0
| 0
| 0
| 0.134964
| 0.049724
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.25
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9f91708559004c1587db8bf5fbe2a059ca12bc6
| 9,693
|
py
|
Python
|
src/recommendation/main_inference.py
|
AntoninJoly/book
|
257c641fd52d0e9499093247727b135ed361d7c4
|
[
"Apache-2.0"
] | null | null | null |
src/recommendation/main_inference.py
|
AntoninJoly/book
|
257c641fd52d0e9499093247727b135ed361d7c4
|
[
"Apache-2.0"
] | null | null | null |
src/recommendation/main_inference.py
|
AntoninJoly/book
|
257c641fd52d0e9499093247727b135ed361d7c4
|
[
"Apache-2.0"
] | null | null | null |
import math
import click
import dgl
import numpy as np
import torch
from src.builder import create_graph
from src.model import ConvModel
from src.utils_data import DataPaths, DataLoader, FixedParameters, assign_graph_features
from src.utils_inference import read_graph, fetch_uids, postprocess_recs
from src.train.run import get_embeddings
from src.metrics import get_recs, create_already_bought
from src.utils import read_data
cuda = torch.cuda.is_available()
device = torch.device('cuda') if cuda else torch.device('cpu')
num_workers = 4 if cuda else 0
def inference_ondemand(user_ids, # List or 'all'
use_saved_graph: bool,
trained_model_path: str,
use_saved_already_bought: bool,
graph_path=None,
ctm_id_path=None,
pdt_id_path=None,
already_bought_path=None,
k=10,
remove=.99,
**params,
):
"""
Given a fully trained model, return recommendations specific to each user.
Files needed to run
-------------------
Params used when training the model:
Those params will indicate how to run inference on the model. Usually, they are outputted during training
(and hyperparametrization).
If using a saved already bought dict:
The already bought dict: the dict includes all previous purchases of all user ids for which recommendations
were requested. If not using a saved dict, it will be created using the graph.
Using a saved already bought dict is not necessary, but might make the inference
process faster.
A) If using a saved graph:
The saved graph: the graph that must include all user ids for which recommendations were requested. Usually,
it is outputted during training. It could also be created by another independent function.
ID mapping: ctm_id and pdt_id mapping that allows to associate real-world information, e.g. item and customer
identifier, to actual nodes in the graph. They are usually saved when generating a graph.
B) If not using a saved graph:
The graph will be generated on demand, using all the files in DataPaths of src.utils_data. All those files will
be needed.
Parameters
----------
See click options below for details.
Returns
-------
Recommendations for all user ids.
"""
# Load & preprocess data
## Graph
if use_saved_graph:
graph = read_graph(graph_path)
ctm_id_df = read_data(ctm_id_path)
pdt_id_df = read_data(pdt_id_path)
else:
# Create graph
data_paths = DataPaths()
fixed_params = FixedParameters(num_epochs=0, start_epoch=0, # Not used (only used in training)
patience=0, edge_batch_size=0, # Not used (only used in training)
remove=remove, item_id_type=params['item_id_type'],
duplicates=params['duplicates'])
data = DataLoader(data_paths, fixed_params)
ctm_id_df = data.ctm_id
pdt_id_df = data.pdt_id
graph = create_graph(
data.graph_schema,
)
graph = assign_graph_features(graph,
fixed_params,
data,
**params,
)
## Preprocess: fetch right user ids
if user_ids[0] == 'all':
test_uids = np.arange(graph.num_nodes('user'))
else:
test_uids = fetch_uids(user_ids,
ctm_id_df)
## Remove already bought
if use_saved_already_bought:
already_bought_dict = read_data(already_bought_path)
else:
bought_eids = graph.out_edges(u=test_uids, form='eid', etype='buys')
already_bought_dict = create_already_bought(graph, bought_eids)
# Load model
dim_dict = {'user': graph.nodes['user'].data['features'].shape[1],
'item': graph.nodes['item'].data['features'].shape[1],
'out': params['out_dim'],
'hidden': params['hidden_dim']}
if 'sport' in graph.ntypes:
dim_dict['sport'] = graph.nodes['sport'].data['features'].shape[1]
trained_model = ConvModel(
graph,
params['n_layers'],
dim_dict,
params['norm'],
params['dropout'],
params['aggregator_type'],
params['pred'],
params['aggregator_hetero'],
params['embedding_layer'],
)
trained_model.load_state_dict(torch.load(trained_model_path, map_location=device))
if cuda:
trained_model = trained_model.to(device)
# Create dataloader
all_iids = np.arange(graph.num_nodes('item'))
test_node_ids = {'user': test_uids, 'item': all_iids}
n_layers = params['n_layers']
if params['embedding_layer']:
n_layers = n_layers - 1
sampler = dgl.dataloading.MultiLayerFullNeighborSampler(n_layers)
nodeloader_test = dgl.dataloading.NodeDataLoader(
graph,
test_node_ids,
sampler,
batch_size=128,
shuffle=True,
drop_last=False,
num_workers=num_workers
)
num_batches_test = math.ceil((len(test_uids) + len(all_iids)) / 128)
# Fetch recs
trained_model.eval()
with torch.no_grad():
embeddings = get_embeddings(graph,
params['out_dim'],
trained_model,
nodeloader_test,
num_batches_test,
cuda,
device,
params['embedding_layer'],
)
recs = get_recs(graph,
embeddings,
trained_model,
params['out_dim'],
k,
test_uids,
already_bought_dict,
remove_already_bought=True,
cuda=cuda,
device=device,
pred=params['pred'],
use_popularity=params['use_popularity'],
weight_popularity=params['weight_popularity']
)
# Postprocess: user & item ids
processed_recs = postprocess_recs(recs,
pdt_id_df,
ctm_id_df,
params['item_id_type'],
params['ctm_id_type'])
print(processed_recs)
return processed_recs
@click.command()
@click.option('--params_path', default='params.pkl',
help='Path where the optimal hyperparameters found in the hyperparametrization were saved.')
@click.option('--user_ids', multiple=True, default=['all'],
help="IDs of users for which to generate recommendations. Either list of user ids, or 'all'.")
@click.option('--use_saved_graph', count=True,
help='If true, will use graph that was saved on disk. Need to import ID mapping for users & items.')
@click.option('--trained_model_path', default='model.pth',
help='Path where fully trained model is saved.')
@click.option('--use_saved_already_bought', count=True,
help='If true, will use already bought dict that was saved on disk.')
@click.option('--graph_path', default='graph.bin',
help='Path where the graph was saved. Mandatory if use_saved_graph is True.')
@click.option('--ctm_id_path', default='ctm_id.pkl',
help='Path where the mapping for customer was save. Mandatory if use_saved_graph is True.')
@click.option('--pdt_id_path', default='pdt_id.pkl',
help='Path where the mapping for items was save. Mandatory if use_saved_graph is True.')
@click.option('--already_bought_path', default='already_bought.pkl',
help='Path where the already bought dict was saved. Mandatory if use_saved_already_bought is True.')
@click.option('--k', default=10,
help='Number of recs to generate for each user.')
@click.option('--remove', default=.99,
help='Percentage of users to remove from graph if used_saved_graph = True. If more than 0, user_ids might'
' not be in the graph. However, higher "remove" allows for faster inference.')
def main(params_path, user_ids, use_saved_graph, trained_model_path,
use_saved_already_bought, graph_path, ctm_id_path, pdt_id_path,
already_bought_path, k, remove):
params = read_data(params_path)
params.pop('k', None)
params.pop('remove', None)
inference_ondemand(user_ids=user_ids, # List or 'all'
use_saved_graph=use_saved_graph,
trained_model_path=trained_model_path,
use_saved_already_bought=use_saved_already_bought,
graph_path=graph_path,
ctm_id_path=ctm_id_path,
pdt_id_path=pdt_id_path,
already_bought_path=already_bought_path,
k=k,
remove=remove,
**params,
)
if __name__ == '__main__':
main()
| 42.327511
| 120
| 0.573816
| 1,126
| 9,693
| 4.711368
| 0.219361
| 0.063713
| 0.022055
| 0.02771
| 0.196041
| 0.147408
| 0.11574
| 0.077851
| 0.025825
| 0.018096
| 0
| 0.004078
| 0.342309
| 9,693
| 228
| 121
| 42.513158
| 0.828078
| 0.175487
| 0
| 0.082353
| 0
| 0.005882
| 0.185449
| 0.009031
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011765
| false
| 0
| 0.076471
| 0
| 0.094118
| 0.005882
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9ff014150d49d6aa831781d7f5630eb634ba2c0
| 5,328
|
py
|
Python
|
evolutron/tools/data_tools.py
|
thrakar9/Evolutron
|
1b9b4c364fe531e5001fd9010898b96e0f5907d7
|
[
"MIT"
] | 10
|
2017-11-30T20:30:12.000Z
|
2021-04-10T21:45:12.000Z
|
evolutron/tools/data_tools.py
|
thrakar9/Evolutron
|
1b9b4c364fe531e5001fd9010898b96e0f5907d7
|
[
"MIT"
] | null | null | null |
evolutron/tools/data_tools.py
|
thrakar9/Evolutron
|
1b9b4c364fe531e5001fd9010898b96e0f5907d7
|
[
"MIT"
] | 3
|
2019-06-20T15:13:42.000Z
|
2020-03-24T11:44:07.000Z
|
# coding=utf-8
from functools import partial
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from .seq_tools import aa2hot
from ..tools import io_tools as io
def data_it(dataset, block_size, multi_data=False):
""" Iterates through a large array, yielding chunks of block_size.
"""
size = len(dataset)
for start_idx in range(0, size, block_size):
excerpt = slice(start_idx, min(start_idx + block_size, size))
if multi_data:
yield [x[excerpt] for x in dataset]
else:
yield dataset[excerpt]
def pad_or_clip_seq(x, n):
if n >= x.shape[0]:
b = np.zeros((n, x.shape[1]))
b[:x.shape[0]] = x
return b
else:
return x[:n, :]
def pad_or_clip_img(x, n):
assert x.shape[0] == x.shape[1], 'Image should be two dimensional with equal dimensions'
if n >= x.shape[0]:
b = np.zeros((n, n))
b[:x.shape[0], :x.shape[1]] = x
return b
else:
return x[:n, :n]
def random_aa_sequence(size):
aa = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']
aa_probs = np.array([0.0825, 0.0135, 0.0545, 0.0675, 0.0385, 0.0705, 0.0225,
0.0595, 0.0585, 0.0965, 0.0245, 0.0405, 0.0475, 0.0395,
0.0555, 0.0665, 0.0535, 0.0685, 0.0105, 0.0295])
return 'M' + ''.join(np.random.choice(aa, size=size, p=aa_probs))
def load_random_aa_seqs(n, length=None, min_length=100, max_length=1000):
if length:
return pd.Series([random_aa_sequence(length) for _ in range(n)])
else:
return pd.Series([random_aa_sequence(np.random.randint(min_length, max_length)) for _ in range(n)])
def preprocess_dataset(x_data, y_data=None, one_hot='x', padded=True, pad_y_data=False, nb_aa=20, min_aa=None,
max_aa=None):
"""
Args:
x_data (pd.Series):
y_data (list or np.ndArray):
one_hot (str):
padded (bool):
pad_y_data (bool):
nb_aa:
min_aa:
max_aa:
Returns:
"""
if 'x' in one_hot:
x_data = x_data.apply(lambda x: aa2hot(x, nb_aa)).tolist()
else:
x_data = x_data.tolist()
if 'y' in one_hot:
pass
if padded:
if not max_aa:
max_aa = int(np.percentile([len(x) for x in x_data], 99)) # pad so that 99% of datapoints are complete
else:
max_aa = min(max_aa, np.max([len(x) for x in x_data]))
x_data = np.asarray([pad_or_clip_seq(x, max_aa) for x in x_data], dtype=np.float32)
if min_aa:
min_aa = max(min_aa, np.max([len(x) for x in x_data]))
x_data = np.asarray([pad_or_clip_seq(x, min_aa) for x in x_data], dtype=np.float32)
if y_data:
if padded and pad_y_data:
y_data = np.asarray([pad_or_clip_seq(y, min_aa) for y in y_data])
else:
y_data = np.asarray(y_data)
assert ((len(x_data) == len(y_data)) or (len(x_data) == len(y_data[0])))
data_size = len(x_data)
print('Dataset size: {0}'.format(data_size))
return x_data, y_data
else:
data_size = len(x_data)
print('Dataset size: {0}'.format(data_size))
return x_data
def load_dataset(infile, codes=None, code_key=None, nb_aa=20, **parser_options):
"""
Loads the Evolutron formatted dataset from the input file. Automatically recognizes file format and calls
corresponding parser.
Args:
infile:
codes:
code_key:
nb_aa:
**parser_options:
Returns: The dataset with the appropriate format given the options.
"""
filename = infile
filetype = filename.split('.')[-1]
if filetype == 'tsv':
x_data, y_data = io.csv_parser(filename, codes, code_key, sep='\t')
elif filetype == 'csv':
x_data, y_data = io.csv_parser(filename, codes, code_key, sep=',')
elif filetype == 'fasta':
x_data, y_data = io.fasta_parser(filename, codes, code_key)
elif filetype == 'sec':
x_data, y_data = io.secs_parser(filename, nb_aa=nb_aa, **parser_options)
elif filetype == 'gz':
x_data, y_data = io.npz_parser(filename, nb_aa=nb_aa, **parser_options)
elif filetype == 'h5':
x_data, y_data = io.h5_parser(filename, **parser_options)
else:
raise NotImplementedError('There is no parser for current file type.')
return x_data, y_data
def train_valid_split(x, y, nb_inputs=1, nb_outputs=1, validation_split=0.0, stratify=None, shuffle=True):
seed = np.random.randint(0, 10)
split_func = partial(train_test_split, test_size=validation_split, stratify=stratify, shuffle=shuffle,
random_state=seed)
if nb_inputs == 1:
x_train, x_valid = split_func(x)
else:
x_train = [[] for _ in x]
x_valid = [[] for _ in x]
for i, x_d in enumerate(x):
x_train[i], x_valid[i] = split_func(x_d)
if nb_outputs == 1:
y_train, y_valid = split_func(y)
else:
y_train = [[] for _ in y]
y_valid = [[] for _ in y]
for i, y_d in enumerate(y):
y_train[i], y_valid[i] = split_func(y_d)
return x_train, y_train, x_valid, y_valid
| 31.714286
| 115
| 0.593844
| 847
| 5,328
| 3.515939
| 0.24085
| 0.043653
| 0.030222
| 0.030222
| 0.283076
| 0.232371
| 0.192075
| 0.165212
| 0.165212
| 0.152451
| 0
| 0.037668
| 0.272523
| 5,328
| 167
| 116
| 31.904192
| 0.73065
| 0.106419
| 0
| 0.196262
| 0
| 0
| 0.037403
| 0
| 0
| 0
| 0
| 0
| 0.018692
| 1
| 0.074766
| false
| 0.009346
| 0.056075
| 0
| 0.233645
| 0.018692
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9ff05579682158568bfec5a2b8abe73eaf5622f
| 1,894
|
py
|
Python
|
magic_timer/format_seconds.py
|
sradc/magic-timer
|
02e95ae7e96787871bd243a2a6a3e14d8615560e
|
[
"MIT"
] | 2
|
2020-09-10T15:43:28.000Z
|
2020-09-11T08:14:36.000Z
|
magic_timer/format_seconds.py
|
sradc/magic-timer
|
02e95ae7e96787871bd243a2a6a3e14d8615560e
|
[
"MIT"
] | 1
|
2020-09-10T10:55:04.000Z
|
2020-09-10T12:40:15.000Z
|
magic_timer/format_seconds.py
|
sradc/magic-timer
|
02e95ae7e96787871bd243a2a6a3e14d8615560e
|
[
"MIT"
] | 1
|
2020-03-06T09:07:54.000Z
|
2020-03-06T09:07:54.000Z
|
"""Turn time in seconds into a readable string.
"""
import math
from typing import Union, Tuple
TIME_UNITS = ( # Order matters
("days", 24 * 60 * 60),
("hours", 60 * 60),
("minutes", 60),
("seconds", 1),
("milliseconds", 1 / 1000),
("microseconds", 1 / 1000_000),
)
def format_seconds(seconds: float) -> str:
"""Convert `seconds` into readable string.
E.g. format_seconds(45.38) -> '46 seconds'
format_seconds(434) -> '7.3 minutes'
"""
try:
value, unit = _convert_to_appropriate_unit(seconds)
except ValueError:
return f"t < 1 {TIME_UNITS[-1][0]}"
value = _round_appropriately(value, unit)
return f"{value} {unit}"
def _convert_to_appropriate_unit(value_in_seconds: float) -> Tuple[float, str]:
"""Convert `value_in_seconds` into an appropriate unit from TIME_UNITS."""
for unit, seconds_in_unit in TIME_UNITS:
if value_in_seconds >= seconds_in_unit:
value = value_in_seconds / seconds_in_unit
return value, unit
raise ValueError("`value_in_seconds` is smaller than the smallest time unit.")
def _round_appropriately(value: float, unit: str) -> Union[int, float]:
"""Round *up* to 2 significant figures
(except for unit="days", and value>=100, which is just rounded
to the nearest whole number).
Round up because it's better to overestimate than underestimate
time taken.
"""
num_integer_digits = len(str(int(value)))
if num_integer_digits <= 1:
return math.ceil(value * 10) / 10
elif num_integer_digits == 2:
return math.ceil(value)
elif num_integer_digits == 3:
if unit == "days":
return math.ceil(value)
return math.ceil(value / 10) * 10
else:
if unit == "days":
return math.ceil(value)
raise ValueError("Should not have more than 3 digits.")
| 31.566667
| 82
| 0.637276
| 255
| 1,894
| 4.560784
| 0.368627
| 0.046432
| 0.060189
| 0.081685
| 0.135856
| 0.135856
| 0.049871
| 0
| 0
| 0
| 0
| 0.039106
| 0.243928
| 1,894
| 59
| 83
| 32.101695
| 0.773045
| 0.2434
| 0
| 0.135135
| 0
| 0
| 0.135802
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081081
| false
| 0
| 0.054054
| 0
| 0.351351
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f9ff74d919dcc09d02f35b81da80d879d34feb93
| 6,458
|
py
|
Python
|
lookmlgen/view.py
|
jimmyshah/lookml-gen2
|
7814d2ea6cf302ef7b937e3365d047b09a9878b4
|
[
"Apache-2.0"
] | 31
|
2017-04-18T03:40:38.000Z
|
2022-02-14T23:06:02.000Z
|
lookmlgen/view.py
|
jimmyshah/lookml-gen2
|
7814d2ea6cf302ef7b937e3365d047b09a9878b4
|
[
"Apache-2.0"
] | 368
|
2017-05-15T07:43:38.000Z
|
2022-03-28T08:55:21.000Z
|
lookmlgen/view.py
|
jimmyshah/lookml-gen2
|
7814d2ea6cf302ef7b937e3365d047b09a9878b4
|
[
"Apache-2.0"
] | 10
|
2017-05-23T03:45:23.000Z
|
2021-08-10T20:19:55.000Z
|
"""
File name: view.py
Author: joeschmid
Date created: 4/8/17
"""
import json
from collections import OrderedDict
try:
from textwrap import indent
except ImportError:
from .util import indent
from .base_generator import BaseGenerator
from .field import FieldType
class View(BaseGenerator):
"""Generates a LookML View
Initialize a View object with your parameters,
add Fields such as :class:`~lookmlgen.field.Dimension`,
:class:`~lookmlgen.field.Measure`,
:class:`~lookmlgen.field.DimensionGroup`, and
:class:`~lookmlgen.field.Filter`, and then
generate LookML for the view using :py:meth:`~View.generate_lookml`
:param name: Name of the view
:param label: Label to use for the view (may contain spaces)
:param sql_table_name: Name of the SQL table to use in the view
:param file: File handle of a file open for writing or a
StringIO object
:type name: string
:type label: string
:type sql_table_name: list of strings
:type file: File handle or StringIO object
"""
def __init__(self, name, label=None, sql_table_name=None, file=None):
super(View, self).__init__(file=file)
self.name = name
self.label = label
self.sql_table_name = sql_table_name
self.fields = OrderedDict()
self.derived_table = None
def generate_lookml(self, file=None, format_options=None):
""" Writes LookML for the view to a file or StringIO buffer.
:param file: File handle of a file open for writing or a
StringIO object
:param format_options: Formatting options to use during generation
:type file: File handle or StringIO object
:type format_options:
:class:`~lookmlgen.base_generator.GeneratorFormatOptions`
"""
if not file and not self.file:
raise ValueError('Must provide a file in either the constructor '
'or as a parameter to generate_lookml()')
f = file if file else self.file
fo = format_options if format_options else self.format_options
if fo.warning_header_comment:
f.write(fo.warning_header_comment)
f.write('view: {self.name} {{\n'.format(self=self))
if self.sql_table_name:
f.write('{indent}sql_table_name: {self.sql_table_name} ;;\n'.
format(indent=' ' * fo.indent_spaces, self=self))
if self.label:
f.write('{indent}label: "{self.label}"\n'.
format(indent=' ' * fo.indent_spaces, self=self))
if fo.newline_between_items:
f.write('\n')
if self.derived_table:
self.derived_table.generate_lookml(file=f, format_options=fo)
if fo.newline_between_items:
f.write('\n')
if fo.view_fields_alphabetical:
self.__ordered_fields = sorted(self.fields.items())
else:
self.__ordered_fields = self.fields.items()
self.__generated_fields = []
self._gen_fields(f, fo, [FieldType.FILTER])
self._gen_fields(f, fo, [FieldType.DIMENSION, FieldType.DIMENSION_GROUP])
self._gen_fields(f, fo, [FieldType.MEASURE])
f.write('}\n')
return
def add_field(self, field):
"""Adds a :class:`~lookmlgen.field.Field` object to a :class:`View`"""
self.fields[field.name] = field
return
def set_derived_table(self, derived_table):
"""Adds a :class:`~lookmlgen.view.DerivedTable` object to a
:class:`View`
"""
self.derived_table = derived_table
def _gen_fields(self, f, fo, field_types):
for k, d in self.__ordered_fields:
if d.field_type not in field_types:
continue
if len(self.__generated_fields) != 0 and fo.newline_between_items:
f.write('\n')
d.generate_lookml(file=f, format_options=fo)
self.__generated_fields.append(d)
class DerivedTable(BaseGenerator):
"""Generates the LookML View parameters to support derived
tables, including persistent derived tables (PDTs).
:param sql: SQL statement to execute
:param sql_trigger_value: SQL to determine when to trigger build
:param indexes: List of coluxn names to use as indexes
:param file: File handle of a file open for writing or a StringIO object
:type sql: string
:type sql_trigger_value: string
:type indexes: list of strings
:type file: File handle or StringIO object
"""
def __init__(self, sql, sql_trigger_value=None, indexes=None, file=None):
super(DerivedTable, self).__init__(file=file)
self.sql = sql
self.sql_trigger_value = sql_trigger_value
self.indexes = indexes
def generate_lookml(self, file=None, format_options=None):
""" Writes LookML for a derived table to a file or StringIO buffer.
:param file: File handle of a file open for writing or a
StringIO object
:param format_options: Formatting options to use during generation
:type file: File handle or StringIO object
:type format_options:
:class:`~lookmlgen.base_generator.GeneratorFormatOptions`
"""
if not file and not self.file:
raise ValueError('Must provide a file in either the constructor '
'or as a parameter to generate_lookml()')
f = file if file else self.file
fo = format_options if format_options else self.format_options
f.write('{indent}derived_table: {{\n'.
format(indent=' ' * fo.indent_spaces))
if self.sql:
final_sql = ' ' + self.sql if '\n' not in self.sql \
else '\n' + indent(self.sql, ' ' * 3 * fo.indent_spaces)
f.write('{indent}sql:{sql} ;;\n'.
format(indent=' ' * 2 * fo.indent_spaces, sql=final_sql))
if self.sql_trigger_value:
f.write('{indent}sql_trigger_value: '
'{self.sql_trigger_value} ;;\n'.
format(indent=' ' * 2 * fo.indent_spaces, self=self))
if self.indexes:
f.write('{indent}indexes: {indexes}\n'.
format(indent=' ' * 2 * fo.indent_spaces,
indexes=json.dumps(self.indexes)))
f.write('{indent}}}\n'.format(indent=' ' * fo.indent_spaces))
| 38.670659
| 81
| 0.623877
| 833
| 6,458
| 4.67587
| 0.170468
| 0.046727
| 0.024647
| 0.019512
| 0.486008
| 0.450578
| 0.391784
| 0.34095
| 0.34095
| 0.306033
| 0
| 0.001932
| 0.278569
| 6,458
| 166
| 82
| 38.903614
| 0.834085
| 0.312945
| 0
| 0.238636
| 0
| 0
| 0.10572
| 0.027999
| 0
| 0
| 0
| 0
| 0
| 1
| 0.079545
| false
| 0
| 0.079545
| 0
| 0.204545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6032655721c54e66ad4bd1357c8c09c9c05f5d4
| 1,788
|
py
|
Python
|
setup.py
|
vikingco/django-smsgateway
|
91675e599a147f4d7e64ff4c4455dbf75ed753d3
|
[
"BSD-3-Clause"
] | 13
|
2015-03-11T06:55:50.000Z
|
2022-02-08T16:50:16.000Z
|
setup.py
|
vikingco/django-smsgateway
|
91675e599a147f4d7e64ff4c4455dbf75ed753d3
|
[
"BSD-3-Clause"
] | 17
|
2015-03-19T12:27:41.000Z
|
2019-12-09T14:21:21.000Z
|
setup.py
|
vikingco/django-smsgateway
|
91675e599a147f4d7e64ff4c4455dbf75ed753d3
|
[
"BSD-3-Clause"
] | 7
|
2015-05-15T00:14:49.000Z
|
2019-06-27T02:46:09.000Z
|
from setuptools import setup, find_packages
from pip._internal.req.req_file import parse_requirements
from pip._internal.download import PipSession
from os import path
from smsgateway import __version__
# Lists of requirements and dependency links which are needed during runtime, testing and setup
install_requires = []
tests_require = []
dependency_links = []
# Inject test requirements from requirements_test.txt into setup.py
requirements_file = parse_requirements(path.join('requirements', 'requirements.txt'), session=PipSession())
for req in requirements_file:
install_requires.append(str(req.req))
if req.link:
dependency_links.append(str(req.link))
# Inject test requirements from requirements_test.txt into setup.py
requirements_test_file = parse_requirements(path.join('.', 'requirements', 'requirements_test.txt'), session=PipSession())
for req in requirements_test_file:
tests_require.append(str(req.req))
if req.link:
dependency_links.append(str(req.link))
setup(
name='django-smsgateway',
version=__version__,
url='https://github.com/vikingco/smsgateway',
license='BSD',
description='SMS gateway for sending text messages',
long_description=open('README.rst', 'r').read(),
author='Unleashed NV',
author_email='operations@unleashed.be',
packages=find_packages('.'),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
setup_requires=['pytest-runner', ],
tests_require=tests_require,
dependency_links=dependency_links,
classifiers=[
'Intended Audience :: Developers',
'Programming Language :: Python',
'Operating System :: OS Independent',
'Environment :: Web Environment',
'Framework :: Django',
],
)
| 33.735849
| 122
| 0.73434
| 212
| 1,788
| 5.985849
| 0.448113
| 0.070922
| 0.037825
| 0.042553
| 0.340426
| 0.340426
| 0.340426
| 0.193853
| 0.193853
| 0.193853
| 0
| 0
| 0.157718
| 1,788
| 52
| 123
| 34.384615
| 0.842629
| 0.125839
| 0
| 0.095238
| 0
| 0
| 0.231559
| 0.028223
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.119048
| 0
| 0.119048
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e603924130bfa4f0b3913ce47ce5a8e5befed2ec
| 2,394
|
py
|
Python
|
querier/queries/filtr.py
|
Techtonique/querier
|
47288fc78273f248199fc67b50e96eaa7dd5441a
|
[
"BSD-3-Clause-Clear"
] | 2
|
2020-09-18T14:58:28.000Z
|
2021-07-14T11:57:14.000Z
|
querier/queries/filtr.py
|
Techtonique/querier
|
47288fc78273f248199fc67b50e96eaa7dd5441a
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
querier/queries/filtr.py
|
Techtonique/querier
|
47288fc78273f248199fc67b50e96eaa7dd5441a
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
# Authors: Thierry Moudiki
#
# License: BSD 3
import numpy as np
from ..utils import parse_request
from ..utils import memoize
# filtr(df, 'tip > 5')
# req = "(time == 'Dinner') & (day == 'Sun') & (tip>1.5)"
# filtr(df, req, limit=3, random=False)
# filtr(df, req, limit=4, random=True)
#
# req = "(tip>1.5)"
# filtr(df, req, limit=7, random=False)œ
# filtr(df, req, limit=5, random=True)
#
# req = "(tip > 5) & (size > 3)"
# filtr(df, req, limit=5, random=False)
# filtr(df, req, limit=8, random=True)
#
# req = "(tip > 5) & (size > 3) & (sex == 'Male')"
# filtr(df, req, limit=7, random=False)
# filtr(df, req, limit=8, random=True)
@memoize
def filtr(df, req=None, limit=None, random=False, seed=123):
""" Filter rows, based on given criteria.
Args:
req: str
criteria for filtering the rows
limit: int
number of records to be retrieved
random: bool
`True` if we want a random set of records
seed: int
reproducibility seed for situations where `random == True`
Examples:
https://github.com/thierrymoudiki/querier/tree/master/querier/demo
"""
if req is None: # useless tho...
return df
# if request is not None:
n, p = df.shape
str_conds = parse_request(req)
df_res = df[eval(str_conds)]
if limit is not None:
assert int(limit) == limit, "limit must be an integer"
if random == False:
try:
return df_res.head(limit)
except:
raise ValueError(
"invalid request: check column names + contents (and parentheses for multiple conditions)"
)
# if random == True:
try:
np.random.seed(seed)
return df_res.iloc[
np.random.choice(
range(0, df_res.shape[0]), size=limit, replace=False
),
]
except:
raise ValueError(
"invalid request: check column names + contents (and parentheses for multiple conditions)"
)
# if limit is None:
try:
return df_res
except:
raise ValueError(
"invalid request: check column names + contents (and parentheses for multiple conditions)"
)
| 25.2
| 110
| 0.543442
| 294
| 2,394
| 4.394558
| 0.37415
| 0.05418
| 0.069659
| 0.092879
| 0.402477
| 0.402477
| 0.357585
| 0.278638
| 0.278638
| 0.221362
| 0
| 0.014603
| 0.342105
| 2,394
| 94
| 111
| 25.468085
| 0.805714
| 0.430242
| 0
| 0.333333
| 0
| 0
| 0.228029
| 0
| 0
| 0
| 0
| 0
| 0.027778
| 1
| 0.027778
| false
| 0
| 0.083333
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e605177b0e5e0389026fb9fd1bf99e17303d4553
| 3,310
|
py
|
Python
|
apps/jobs/dota_parse_curated/insights/skill_insights.py
|
ajkdrag/Dota-2-Pyspark-EDA
|
aa64cd06248143703792ad668288518804679735
|
[
"MIT"
] | null | null | null |
apps/jobs/dota_parse_curated/insights/skill_insights.py
|
ajkdrag/Dota-2-Pyspark-EDA
|
aa64cd06248143703792ad668288518804679735
|
[
"MIT"
] | null | null | null |
apps/jobs/dota_parse_curated/insights/skill_insights.py
|
ajkdrag/Dota-2-Pyspark-EDA
|
aa64cd06248143703792ad668288518804679735
|
[
"MIT"
] | null | null | null |
import pyspark.sql.functions as F
def top_k_most_picked_skills(match_hero_names_df, ohe_heroes_df, k=5):
skills_df = match_hero_names_df.join(
ohe_heroes_df, on=[match_hero_names_df.hero == ohe_heroes_df.name]
).select(ohe_heroes_df.columns[3:])
skills = skills_df.columns
skills_df_agg = skills_df.select([F.sum(col).alias(col) for col in skills])
list_skill_picks = [
F.struct(F.lit(col).alias("skill"), F.col(col).alias("num_picks"))
for col in skills
]
return (
skills_df_agg.select(F.explode(F.array(list_skill_picks)).alias("exploded"))
.select("exploded.*")
.orderBy(F.desc("num_picks"))
.limit(k)
)
def top_k_skills_in_most_wins(
match_hero_names_df, match_details_df, ohe_heroes_df, k=5
):
return top_k_most_picked_skills(
match_hero_names_df.join(
match_details_df,
on=[
match_hero_names_df.match_id == match_details_df.match_id,
match_hero_names_df.team == match_details_df.winner,
],
),
ohe_heroes_df,
k=k,
)
def top_k_skills_with_highest_win_rates(
match_hero_names_df, match_details_df, ohe_heroes_df, k=10
):
merged_df = match_hero_names_df.join(
match_details_df, on=[match_hero_names_df.match_id == match_details_df.match_id]
).select(
[
match_hero_names_df.match_id,
match_hero_names_df.hero,
match_hero_names_df.team,
"winner",
]
)
hero_scores_df = merged_df.withColumn(
"score",
F.when(F.col("winner") == F.col("team"), F.lit(1)).otherwise(F.lit(0)),
)
skills = ohe_heroes_df.columns[3:]
skill_scores_df = hero_scores_df.join(
ohe_heroes_df, on=[hero_scores_df.hero == ohe_heroes_df.name]
)
list_skill_picks = [
F.struct(
F.lit(col).alias("skill"),
(F.col("score") * F.col(col)).alias("wins"),
(F.col(col)).alias("picks"),
)
for col in skills
]
return (
skill_scores_df.select(F.explode(F.array(list_skill_picks)).alias("exploded"))
.select("exploded.*")
.groupBy("skill")
.agg(
F.sum("picks").alias("total_picks"),
F.sum("wins").alias("total_wins"),
)
.withColumn("win_rate", (100 * F.col("total_wins")) / F.col("total_picks"))
.orderBy(F.desc("win_rate"))
.limit(k)
)
def get_all_skill_insights(entities):
match_hero_names_df = entities["match_hero_names"]
ohe_heroes_df = entities["ohe_heroes"]
match_details_df = entities["match_details"]
top_k_most_picked_skills_df = top_k_most_picked_skills(
match_hero_names_df, ohe_heroes_df
)
top_k_skills_in_most_wins_df = top_k_skills_in_most_wins(
match_hero_names_df, match_details_df, ohe_heroes_df
)
top_k_skills_with_highest_win_rates_df = top_k_skills_with_highest_win_rates(
match_hero_names_df, match_details_df, ohe_heroes_df
)
entities["insight_most_picked_skills"] = top_k_most_picked_skills_df
entities["insight_skills_in_most_wins"] = top_k_skills_in_most_wins_df
entities["insight_skills_with_highest_wr"] = top_k_skills_with_highest_win_rates_df
| 31.52381
| 88
| 0.652266
| 482
| 3,310
| 4.008299
| 0.149378
| 0.083851
| 0.130435
| 0.140787
| 0.614907
| 0.576605
| 0.468427
| 0.417184
| 0.385093
| 0.374741
| 0
| 0.004322
| 0.231118
| 3,310
| 104
| 89
| 31.826923
| 0.754813
| 0
| 0
| 0.159091
| 0
| 0
| 0.08852
| 0.025076
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.011364
| 0.011364
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6056330329bc68abb7e5b76c93b9c6288226754
| 9,217
|
py
|
Python
|
expert/layers/divisive_normalisation.py
|
alexhepburn/expert
|
546f7452ced2213ef91e5ce6e7456a1668dd9f95
|
[
"BSD-3-Clause"
] | 1
|
2021-04-10T11:34:22.000Z
|
2021-04-10T11:34:22.000Z
|
expert/layers/divisive_normalisation.py
|
alexhepburn/expert
|
546f7452ced2213ef91e5ce6e7456a1668dd9f95
|
[
"BSD-3-Clause"
] | null | null | null |
expert/layers/divisive_normalisation.py
|
alexhepburn/expert
|
546f7452ced2213ef91e5ce6e7456a1668dd9f95
|
[
"BSD-3-Clause"
] | null | null | null |
"""
The :mod:`expert.layers.divisive_normalisation` module holds classes of
layers for a network that use divisive normalisation. This includes
generalised divisive normalisation.
"""
# Author: Alex Hepburn <alex.hepburn@bristol.ac.uk>
# License: new BSD
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['GDN']
class GDN(nn.Module):
"""
Generalised Divisve Normalisation proposed in [BALLE2015GDN]_.
The activation function this layer implements when kernel size is 1 is
given by:
.. math::
y[i] = x[i] / sqrt(beta[i] + sum_j(gamma[j, i] * x[j]^2))
where `i` and `j` run over channels.
If the kernel_size is kept to the default value of 1, this represents the
true generalised divisve normalisation proposed in [BALLE2015GDN]_. If the
kernel size is larger than 1, the convolution acts not only channel wise
but also acts spatially. This is called spatial generalised divisive
normalisation.
.. [BALLE2015GDN] Ballé, Johannes, et al. Density Modeling of Images Using
a Generalized Normalization Transformation. Nov. 2015. arxiv.org,
https://arxiv.org/abs/1511.06281v4.
Parameters
----------
n_channels : int
Number of channels that the input to this layer will have.
kernel_size : int, optional (default=1)
Size of the kernel. A square kernel is always used and will have shape
[kernel_size, kernel_size]
stride : int, optional (default=1)
The stride of the convolution in the forward pass.
padding : int, optional (default=0)
The padding to be used in the convolution in the forward pass. In order
to get the output of the convolution to be the same size as the input,
to avoid having to interpolate, then the ``padding`` parameter should
be chosen carefully.
gamma_init : float, optional (default=0.1)
The value that the gamma matrix will be initialised with, it will be
the identity multiplied by this value.
reparam_offset : float, optional (default=2*1e-18)
beta_min : float, optional (default=1e-6)
The minimum value that the beta value can reach.
apply_independently : boolean, optional (default=False)
A boolean that determines whether this operation is applied channel
wise or not. If not, then the divisive normalisation just divides each
channel by a learnable parameter, and they are treated independently.
Raises
------
TypeError
``n_channels`` parameter is not an integer larger than 0. ``stride``
parameter is not an integer larger than 0. ``padding`` parameter is not
an integer larger or equal to 0. ``gamma_init`` parameter is not a
positive float. ``reparam_offset`` parameter is not a positive float.
``beta_min`` parameter is not a positive float. ``apply_independently``
is not a boolean.
Attributes
----------
reparam_offset : float
Reparameterisation offset as to avoid gamma or beta going close to zero
and the gradients when backpropogating to approaching zero.
beta_reparam : float
Reparameterisation offset for the beta parameter specifically.
groups : int
Number of groups to use in the convolution operation. If
``apply_independently`` is ``True`` then this should be 1, otherwise
equal to ``n_channels``.
gamma : torch.Tensor
The torch tensor for the weights to be used in the convolution
operation.
beta : torch.Tensor
The torch tensor for the bias to be used in the convoltuion operation.
"""
def __init__(self,
n_channels: int,
kernel_size: int = 1,
stride: int = 1,
padding: int = 0,
gamma_init: float = .1,
reparam_offset: float = 2**-18,
beta_min: float = 1e-6,
apply_independently: bool = False) -> None:
"""
Constructs a ``GDN`` generalised divisive normalisation class.
"""
super(GDN, self).__init__()
assert self._validate_input(n_channels, kernel_size, stride, padding,
gamma_init, reparam_offset, beta_min,
apply_independently)
self.stride = stride
self.padding = padding
self.reparam_offset = reparam_offset
self.beta_reparam = (beta_min + self.reparam_offset**2)**0.5
if apply_independently:
self.groups = n_channels
else:
self.groups = 1
# Initialise the gamma and beta parameters
gamma_bound = self.reparam_offset
gamma = torch.eye(n_channels, dtype=torch.float)
gamma = gamma.view(n_channels, n_channels, 1, 1)
gamma = gamma.repeat(1, 1, kernel_size, kernel_size)
gamma = torch.sqrt(gamma_init*gamma + self.reparam_offset**2)
gamma = torch.mul(gamma, gamma)
if apply_independently:
gammas = [g[i, :, :] for i, g in enumerate(gamma)]
gamma = torch.stack(gammas).unsqueeze(1)
self.gamma = nn.Parameter(gamma)
beta = torch.ones((n_channels,))
beta = torch.sqrt(beta + self.reparam_offset**2)
self.beta = nn.Parameter(beta)
def _validate_input(self,
n_channels: int,
kernel_size: int,
stride: int,
padding: int,
gamma_init: float,
reparam_offset: float,
beta_min: float,
apply_independently: bool) -> bool:
"""
Validates input of the generalised divisive normalisation class.
For the description of the input parameters and exceptions raised by
this function, please see the documentation of the
:class:`expert.layers.divisive_normalisation.GDN` class.
Returns
-------
is_valid
``True`` if input is valid, ``False`` otherwise.
"""
is_valid = False
if not isinstance(n_channels, int) or n_channels <= 0:
raise TypeError('n_channels parameter must be an integer greater '
'than 0.')
if not isinstance(kernel_size, int) or kernel_size <= 0:
raise TypeError('kernel_size parameter must be an integer greater '
'than 0.')
if not isinstance(stride, int) or stride <= 0:
raise TypeError('stride parameter must be an integer greater than '
'0.')
if not isinstance(padding, int) or padding < 0:
raise TypeError('padding parameter must be a positive integer.')
if not isinstance(gamma_init, float) or gamma_init < 0:
raise TypeError('gamma_init parameter must be a positive float.')
if not isinstance(reparam_offset, float) or reparam_offset < 0:
raise TypeError('reparam_offset parameter must be a positive '
'float.')
if not isinstance(beta_min, float) or beta_min < 0:
raise TypeError('beta_min parameter must be a positive float.')
if not isinstance(apply_independently, bool):
raise TypeError('apply_independently parameter must be a boolean.')
is_valid = True
return is_valid
def clamp_parameters(self) -> None:
"""
Clamps the gamma and beta parameters that are used in the convolution.
The gamma and beta parameters are clamped, ignoring the gradient of
the clamping, to the ``reparam_offset`` and ``beta_reparam``
parameters.
"""
with torch.no_grad():
self.gamma = nn.Parameter(torch.clamp(self.gamma.data,
min=self.reparam_offset))
self.beta = nn.Parameter(torch.clamp(self.beta.data,
min=self.beta_reparam))
def forward(self,
x: torch.Tensor) -> torch.Tensor:
"""
Forward pass of the layer
Parameters
----------
x : torch.Tensor
The input to the layer. Must be of shape [batch_size, channels,
height, width].
Raises
------
TypeError:
Input parameter ``x`` is not of dtype torch.float.
Returns
-------
output : torch.Tensor
Output of the generalised divisive normalisation layer.
"""
if x.dtype != torch.float32:
raise TypeError('Input x must be of type torch.float32.')
self.clamp_parameters()
norm_pool = F.conv2d(torch.mul(x, x), self.gamma, bias=self.beta,
groups=self.groups, stride=self.stride,
padding=self.padding)
norm_pool = torch.sqrt(norm_pool)
_, _, height, width = x.size()
image_size = [int(height), int(width)]
norm_pool = F.interpolate(norm_pool, size=image_size)
output = x / norm_pool
return output
| 39.728448
| 79
| 0.605946
| 1,127
| 9,217
| 4.853594
| 0.216504
| 0.040402
| 0.021938
| 0.014625
| 0.193053
| 0.143693
| 0.086472
| 0.064534
| 0.052102
| 0.027971
| 0
| 0.012658
| 0.314311
| 9,217
| 231
| 80
| 39.900433
| 0.852848
| 0.45926
| 0
| 0.065934
| 0
| 0
| 0.098309
| 0
| 0
| 0
| 0
| 0
| 0.010989
| 1
| 0.043956
| false
| 0
| 0.032967
| 0
| 0.10989
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e606b4f2c79395b331203078985352a2c129c2b0
| 3,231
|
py
|
Python
|
05/solution.py
|
Hegemege/advent-of-code-2019
|
01c2a84fb205069867453f6ba652813a0005fd88
|
[
"MIT"
] | null | null | null |
05/solution.py
|
Hegemege/advent-of-code-2019
|
01c2a84fb205069867453f6ba652813a0005fd88
|
[
"MIT"
] | null | null | null |
05/solution.py
|
Hegemege/advent-of-code-2019
|
01c2a84fb205069867453f6ba652813a0005fd88
|
[
"MIT"
] | null | null | null |
MAX_INST_PARAM_COUNT = 3
def run_program(memory, input_buffer):
pc = 0
while True:
result_code, pc_offset = execute_instruction(memory, pc, input_buffer)
if result_code == -1: # Halt instruction
return
if result_code == 0: # Non-jump instructions
pc += pc_offset
elif result_code == 1: # Jump instructions
pc = pc_offset
def execute_instruction(memory, position, input_buffer):
instruction_header = memory[position]
op_code = int(str(instruction_header)[-2:])
if op_code == 99:
return (-1, 1)
# Get parameter modes and pad the rest
parameter_modes_str = str(instruction_header)[:-2][::-1]
parameter_modes_str = parameter_modes_str.ljust(MAX_INST_PARAM_COUNT, '0')
parameter_modes = list(map(int, parameter_modes_str))
# Add and multiply
if op_code == 1 or op_code == 2:
operator = int.__add__ if op_code == 1 else int.__mul__
parameter1 = get_parameter(memory, position, 1, parameter_modes)
parameter2 = get_parameter(memory, position, 2, parameter_modes)
write_addr = memory[position + 3]
memory[write_addr] = operator(parameter1, parameter2)
return (0, 4)
# Input
if op_code == 3:
write_addr = memory[position + 1]
input_value = input_buffer.pop(0)
print("IN".ljust(6, ' ') + str(input_value))
memory[write_addr] = input_value
return (0, 2)
# Output
if op_code == 4:
output_value = get_parameter(memory, position, 1, parameter_modes)
print("OUT".ljust(6, ' ') + str(output_value))
return (0, 2)
# Jump-if-true && jump-if-false
if op_code == 5 or op_code == 6:
parameter1 = get_parameter(memory, position, 1, parameter_modes)
parameter2 = get_parameter(memory, position, 2, parameter_modes)
# A XNOR B
if (parameter1 == 0) == (op_code == 5):
return (0, 3)
return (1, parameter2)
# Less-than && equals
if op_code == 7 or op_code == 8:
operator = int.__lt__ if op_code == 7 else int.__eq__
parameter1 = get_parameter(memory, position, 1, parameter_modes)
parameter2 = get_parameter(memory, position, 2, parameter_modes)
write_addr = memory[position + 3]
memory[write_addr] = 1 if operator(parameter1, parameter2) else 0
return (0, 4)
print("OPCODE NOT IMPLEMENTED:", op_code)
def get_parameter(memory, position, offset, parameter_modes):
return memory[memory[position + offset]] if parameter_modes[offset - 1] == 0 else memory[position + offset]
def part1(part_input):
print("PART 1")
memory = parse_input_file(part_input)
input_buffer = [1]
run_program(memory, input_buffer)
def part2(part_input):
print("PART 2")
memory = parse_input_file(part_input)
input_buffer = [5]
run_program(memory, input_buffer)
def parse_input_file(input_file_contents):
return list(map(int, input_file_contents.split(",")))
if __name__ == '__main__':
with open('input', 'r') as input_file:
input_file_contents = input_file.readline()
part1(input_file_contents)
part2(input_file_contents)
| 31.368932
| 111
| 0.647478
| 427
| 3,231
| 4.604215
| 0.215457
| 0.106816
| 0.032553
| 0.105799
| 0.354018
| 0.287386
| 0.256867
| 0.236012
| 0.19532
| 0.19532
| 0
| 0.029943
| 0.245435
| 3,231
| 102
| 112
| 31.676471
| 0.776456
| 0.056329
| 0
| 0.231884
| 0
| 0
| 0.019092
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0
| 0.028986
| 0.231884
| 0.072464
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e609e4bd2d6607c75f66f23c28efe8a4fdb25c1b
| 1,080
|
py
|
Python
|
xxmodularsynth/midi/clock_external.py
|
xavierxeon-music/MusicTools
|
05c74218df18c4ee385895b721c7ad24ea0df552
|
[
"MIT"
] | null | null | null |
xxmodularsynth/midi/clock_external.py
|
xavierxeon-music/MusicTools
|
05c74218df18c4ee385895b721c7ad24ea0df552
|
[
"MIT"
] | null | null | null |
xxmodularsynth/midi/clock_external.py
|
xavierxeon-music/MusicTools
|
05c74218df18c4ee385895b721c7ad24ea0df552
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from rtmidi.midiconstants import SONG_POSITION_POINTER, TIMING_CLOCK, SONG_START, SONG_CONTINUE, SONG_STOP
from .clock_abstract import ClockAbstract
from .midi_input import MidiInput
class ClockExternal(ClockAbstract, MidiInput):
def __init__(self, name=None, port=None):
ClockAbstract.__init__(self)
MidiInput.__init__(self, name, port)
self.midiin.ignore_types(timing=False)
def _callback(self, event, _):
message, _ = event
midiEvent = message[0]
if midiEvent == SONG_POSITION_POINTER:
front = message[1]
back = message[2]
position = 1 + (128 * front) + back
self._setSongPosition(position)
elif midiEvent == TIMING_CLOCK:
self._clockTick()
elif midiEvent == SONG_START:
self._setState(ClockAbstract.State.Start)
elif midiEvent == SONG_CONTINUE:
self._setState(ClockAbstract.State.Continue)
elif midiEvent == SONG_STOP:
self._setState(ClockAbstract.State.Stop)
| 30.857143
| 106
| 0.659259
| 115
| 1,080
| 5.886957
| 0.426087
| 0.076809
| 0.075332
| 0.132939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009926
| 0.253704
| 1,080
| 34
| 107
| 31.764706
| 0.830025
| 0.019444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.125
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e60bf0fffa5d03c3b96086eba8b1615a72a54c3f
| 15,002
|
py
|
Python
|
fhir/resources/DSTU2/careplan.py
|
cstoltze/fhir.resources
|
52f99738935b7313089d89daf94d73ce7d167c9d
|
[
"BSD-3-Clause"
] | 144
|
2019-05-08T14:24:43.000Z
|
2022-03-30T02:37:11.000Z
|
fhir/resources/DSTU2/careplan.py
|
cstoltze/fhir.resources
|
52f99738935b7313089d89daf94d73ce7d167c9d
|
[
"BSD-3-Clause"
] | 82
|
2019-05-13T17:43:13.000Z
|
2022-03-30T16:45:17.000Z
|
fhir/resources/DSTU2/careplan.py
|
cstoltze/fhir.resources
|
52f99738935b7313089d89daf94d73ce7d167c9d
|
[
"BSD-3-Clause"
] | 48
|
2019-04-04T14:14:53.000Z
|
2022-03-30T06:07:31.000Z
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/CarePlan
Release: DSTU2
Version: 1.0.2
Revision: 7202
"""
from typing import Any, Dict
from typing import List as ListType
from pydantic import Field, root_validator
from . import fhirtypes
from .backboneelement import BackboneElement
from .domainresource import DomainResource
class CarePlan(DomainResource):
"""Healthcare plan for patient or group.
Describes the intention of how one or more practitioners intend to deliver
care for a particular patient, group or community for a period of time,
possibly limited to care for a specific condition or set of conditions.
"""
resource_type = Field("CarePlan", const=True)
activity: ListType[fhirtypes.CarePlanActivityType] = Field(
None,
alias="activity",
title="List of `CarePlanActivity` items (represented as `dict` in JSON)",
description="Action to occur as part of plan",
)
addresses: ListType[fhirtypes.ReferenceType] = Field(
None,
alias="addresses",
title=(
"List of `Reference` items referencing `Condition` (represented as "
"`dict` in JSON)"
),
description="Health issues this plan addresses",
)
author: ListType[fhirtypes.ReferenceType] = Field(
None,
alias="author",
title=(
"List of `Reference` items referencing `Patient, Practitioner, "
"RelatedPerson, Organization, CareTeam` (represented as `dict` in JSON)"
),
description="Who is responsible for contents of the plan",
)
category: ListType[fhirtypes.CodeableConceptType] = Field(
None,
alias="category",
title="List of `CodeableConcept` items (represented as `dict` in JSON)",
description="Type of plan",
)
context: fhirtypes.ReferenceType = Field(
None,
alias="context",
title=(
"Type `Reference` referencing `Encounter, EpisodeOfCare` (represented "
"as `dict` in JSON)"
),
description="Created in context of",
)
description: fhirtypes.String = Field(
None,
alias="description",
title="Type `String` (represented as `dict` in JSON)",
description="Summary of nature of plan",
)
goal: ListType[fhirtypes.ReferenceType] = Field(
None,
alias="goal",
title=(
"List of `Reference` items referencing `Goal` (represented as `dict` in"
" JSON)"
),
description="Desired outcome of plan",
)
identifier: ListType[fhirtypes.IdentifierType] = Field(
None,
alias="identifier",
title="List of `Identifier` items (represented as `dict` in JSON)",
description="External Ids for this plan",
)
note: fhirtypes.AnnotationType = Field(
None,
alias="note",
title="Type `Annotation` items (represented as `dict` in JSON)",
description="Comments about the plan",
)
period: fhirtypes.PeriodType = Field(
None,
alias="period",
title="Type `Period` (represented as `dict` in JSON)",
description="Time period plan covers",
)
status: fhirtypes.Code = Field(
...,
alias="status",
title="Type `Code` (represented as `dict` in JSON)",
description=(
"draft | active | suspended | completed | entered-in-error | cancelled "
"| unknown"
),
)
subject: fhirtypes.ReferenceType = Field(
None,
alias="subject",
title=(
"Type `Reference` referencing `Patient, Group` (represented as `dict` "
"in JSON)"
),
description="Who care plan is for",
)
support: ListType[fhirtypes.ReferenceType] = Field(
None,
alias="support",
title=(
"List of `Reference` items referencing `Resource` (represented as "
"`dict` in JSON)"
),
description="Information considered as part of plan",
)
modified: fhirtypes.DateTime = Field(
None,
alias="title",
title="Type `DateTime` (represented as `dict` in JSON)",
description="When last updated",
)
participant: ListType[fhirtypes.CarePlanParticipantType] = Field(
None,
alias="participant",
title="List of `CarePlanParticipant` items (represented as `dict` in JSON).",
description="Who's involved in plan?.",
)
relatedPlan: ListType[fhirtypes.CarePlanRelatedPlanType] = Field(
None,
alias="relatedPlan",
title="Plans related to this one.",
description="List of `CarePlanRelatedPlan` items (represented as `dict` in JSON).",
)
class CarePlanActivity(BackboneElement):
"""Action to occur as part of plan.
Identifies a planned action to occur as part of the plan. For example, a
medication to be used, lab tests to perform, self-monitoring, education,
etc.
"""
resource_type = Field("CarePlanActivity", const=True)
actionResulting: ListType[fhirtypes.ReferenceType] = Field(
None,
alias="actionResulting",
title=(
"List of `Reference` items referencing `Resource` "
"(represented as `dict` in JSON)."
),
description="Appointments, orders, etc..",
)
detail: fhirtypes.CarePlanActivityDetailType = Field(
None,
alias="detail",
title="Type `CarePlanActivityDetail` (represented as `dict` in JSON).",
description="In-line definition of activity.",
)
progress: ListType[fhirtypes.AnnotationType] = Field(
None,
alias="progress",
title="List of `Annotation` items (represented as `dict` in JSON).",
description="Comments about the activity status/progress.",
)
reference: fhirtypes.ReferenceType = Field(
None,
alias="reference",
title=(
"Type `Reference` referencing `Appointment, CommunicationRequest, "
"DeviceUseRequest, DiagnosticOrder, MedicationOrder, "
"NutritionOrder, Order, ProcedureRequest, ProcessRequest, "
"ReferralRequest, SupplyRequest, VisionPrescription` "
"(represented as `dict` in JSON)."
),
description="Activity details defined in specific resource.",
)
class CarePlanActivityDetail(BackboneElement):
"""In-line definition of activity.
A simple summary of a planned activity suitable for a general care plan
system (e.g. form driven) that doesn't know about specific resources such
as procedure etc.
"""
resource_type = Field("CarePlanActivityDetail", const=True)
category: fhirtypes.CodeableConceptType = Field(
None,
alias="category",
title="Type `CodeableConcept` (represented as `dict` in JSON).",
description="diet | drug | encounter | observation | procedure | supply | other.",
)
code: fhirtypes.CodeableConceptType = Field(
None,
alias="code",
title="Type `CodeableConcept` (represented as `dict` in JSON).",
description="Detail type of activity.",
)
dailyAmount: fhirtypes.QuantityType = Field(
None,
alias="dailyAmount",
title="Type `Quantity` referencing `SimpleQuantity` (represented as `dict` in JSON).",
description="How to consume/day?.",
)
description: fhirtypes.String = Field(
None,
alias="description",
title="Type `str`.",
description="Extra info describing activity to perform.",
)
goal: ListType[fhirtypes.ReferenceType] = Field(
None,
alias="goal",
title="List of `Reference` items referencing `Goal` (represented as `dict` in JSON).",
description="Goals this activity relates to.",
)
location: fhirtypes.ReferenceType = Field(
None,
alias="location",
title="Type `Reference` referencing `Location` (represented as `dict` in JSON).",
description="Where it should happen.",
)
performer: ListType[fhirtypes.ReferenceType] = Field(
None,
alias="performer",
title=(
"List of `Reference` items referencing `Practitioner,"
" Organization, RelatedPerson, Patient` "
"(represented as `dict` in JSON)."
),
description="Who will be responsible?.",
)
productCodeableConcept: fhirtypes.CodeableConceptType = Field(
None,
alias="productCodeableConcept",
title="Type `CodeableConcept` (represented as `dict` in JSON).",
description="What is to be administered/supplied.",
one_of_many="product", # Choice of Data Types. i.e value[x]
one_of_many_required=False,
)
productReference: fhirtypes.ReferenceType = Field(
None,
alias="productReference",
title=(
"Type `Reference` referencing `Medication, Substance`"
" (represented as `dict` in JSON)."
),
description="What is to be administered/supplied.",
one_of_many="product", # Choice of Data Types. i.e value[x]
one_of_many_required=False,
)
prohibited: fhirtypes.Boolean = Field(
None, alias="prohibited", title="Type `bool`.", description="Do NOT do."
)
quantity: fhirtypes.QuantityType = Field(
None,
alias="quantity",
title=(
"Type `Quantity` referencing `SimpleQuantity` "
"(represented as `dict` in JSON)."
),
description="How much to administer/supply/consume.",
)
reasonCode: ListType[fhirtypes.CodeableConceptType] = Field(
None,
alias="reasonCode",
title="List of `CodeableConcept` items (represented as `dict` in JSON).",
description="Why activity should be done.",
)
reasonReference: ListType[fhirtypes.ReferenceType] = Field(
None,
alias="reasonReference",
title=(
"List of `Reference` items referencing `Condition` "
"(represented as `dict` in JSON)."
),
description="Condition triggering need for activity.",
)
scheduledPeriod: fhirtypes.PeriodType = Field(
None,
alias="scheduledPeriod",
title="Type `Period` (represented as `dict` in JSON).",
description="When activity is to occur.",
one_of_many="scheduled", # Choice of Data Types. i.e value[x]
one_of_many_required=False,
)
scheduledString: fhirtypes.String = Field(
None,
alias="scheduledString",
title="Type `str`.",
description="When activity is to occur.",
one_of_many="scheduled", # Choice of Data Types. i.e value[x]
one_of_many_required=False,
)
scheduledTiming: fhirtypes.TimingType = Field(
None,
alias="scheduledTiming",
title="Type `Timing` (represented as `dict` in JSON).",
description="When activity is to occur.",
one_of_many="scheduled", # Choice of Data Types. i.e value[x]
one_of_many_required=False,
)
status: fhirtypes.Code = Field(
None,
alias="status",
title="Type `str`.",
description="not-started | scheduled | in-progress | on-hold | completed | cancelled.",
)
statusReason: fhirtypes.CodeableConceptType = Field(
None,
alias="statusReason",
title="Type `CodeableConcept` (represented as `dict` in JSON).",
description="Reason for current status.",
)
@root_validator(pre=True)
def validate_one_of_many(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""https://www.hl7.org/fhir/formats.html#choice
A few elements have a choice of more than one data type for their content.
All such elements have a name that takes the form nnn[x].
The "nnn" part of the name is constant, and the "[x]" is replaced with
the title-cased name of the type that is actually used.
The table view shows each of these names explicitly.
Elements that have a choice of data type cannot repeat - they must have a
maximum cardinality of 1. When constructing an instance of an element with a
choice of types, the authoring system must create a single element with a
data type chosen from among the list of permitted data types.
"""
one_of_many_fields = {
"scheduled": ["scheduledPeriod", "scheduledString", "scheduledTiming"],
"product": ["productCodeableConcept", "productReference"],
}
for prefix, fields in one_of_many_fields.items():
assert cls.__fields__[fields[0]].field_info.extra["one_of_many"] == prefix
required = (
cls.__fields__[fields[0]].field_info.extra["one_of_many_required"]
is True
)
found = False
for field in fields:
if field in values and values[field] is not None:
if found is True:
raise ValueError(
"Any of one field value is expected from "
f"this list {fields}, but got multiple!"
)
else:
found = True
if required is True and found is False:
raise ValueError(f"Expect any of field value from this list {fields}.")
return values
class CarePlanParticipant(BackboneElement):
"""Who's involved in plan?.
Identifies all people and organizations who are expected to be involved in
the care envisioned by this plan.
"""
resource_type = Field("CarePlanParticipant", const=True)
member: fhirtypes.ReferenceType = Field(
None,
alias="member",
title=(
"Type `Reference` referencing `Practitioner, RelatedPerson,"
" Patient, Organization` (represented as `dict` in JSON)."
),
description="Who is involved.",
)
role: fhirtypes.CodeableConceptType = Field(
None,
alias="role",
title="Type `CodeableConcept` (represented as `dict` in JSON).",
description="Type of involvement.",
)
class CarePlanRelatedPlan(BackboneElement):
"""Plans related to this one.
Identifies CarePlans with some sort of formal relationship to the current
plan.
"""
resource_type = Field("CarePlanRelatedPlan", const=True)
code: fhirtypes.Code = Field(
None,
alias="code",
title="Type `str`.",
description="includes | replaces | fulfills.",
)
plan: fhirtypes.ReferenceType = Field(
...,
alias="plan",
title="Plan relationship exists with.",
description="Type `Reference` referencing `CarePlan` (represented as `dict` in JSON).",
)
| 33.190265
| 95
| 0.611318
| 1,528
| 15,002
| 5.965314
| 0.22644
| 0.039495
| 0.061437
| 0.077126
| 0.444981
| 0.357323
| 0.281404
| 0.239495
| 0.229622
| 0.181349
| 0
| 0.001303
| 0.284029
| 15,002
| 451
| 96
| 33.263858
| 0.847314
| 0.121984
| 0
| 0.33526
| 0
| 0
| 0.375916
| 0.017192
| 0
| 0
| 0
| 0
| 0.00289
| 1
| 0.00289
| false
| 0
| 0.017341
| 0
| 0.17341
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e60f46b99d8c8cc0e5235381e1f3fff24068c1ab
| 19,993
|
py
|
Python
|
cogen/core/sockets.py
|
ionelmc/cogen
|
83b0edb88425eba6e5bfda9f1dcd34642517e2a8
|
[
"MIT"
] | 6
|
2016-01-22T09:42:45.000Z
|
2020-11-28T14:00:25.000Z
|
cogen/core/sockets.py
|
ionelmc/cogen
|
83b0edb88425eba6e5bfda9f1dcd34642517e2a8
|
[
"MIT"
] | 1
|
2017-02-16T15:20:11.000Z
|
2017-02-16T22:50:41.000Z
|
cogen/core/sockets.py
|
ionelmc/python-cogen
|
83b0edb88425eba6e5bfda9f1dcd34642517e2a8
|
[
"MIT"
] | null | null | null |
"""
Socket-only coroutine operations and `Socket` wrapper.
Really - the only thing you need to know for most stuff is
the :class:`~cogen.core.sockets.Socket` class.
"""
#TODO: how to deal with requets that have unicode params
__all__ = [
'getdefaulttimeout', 'setdefaulttimeout', 'Socket', 'SendFile', 'Recv',
'Send', 'SendAll','Accept','Connect',
'SocketOperation', 'SocketError', 'ConnectionClosed'
]
from socket import socket as stdsocket, AF_INET, SOCK_STREAM
import events
from coroutines import coro
_TIMEOUT = None
class SocketError(Exception):
"Raised when a socket has a error flag (in epoll or select)"
class ConnectionClosed(SocketError):
"Raised when the other peer has closed connection."
def getdefaulttimeout():
return _TIMEOUT
def setdefaulttimeout(timeout):
"""Set the default timeout used by the socket wrapper
(`Socket <cogen.core.sockets.Socket.html>`_ class)"""
global _TIMEOUT
_TIMEOUT = timeout
class Socket(object):
"""
A wrapper for socket objects, sets nonblocking mode and
adds some internal bufers and wrappers. Regular calls to the usual
socket methods return operations for use in a coroutine.
So you use this in a coroutine like:
.. sourcecode:: python
sock = Socket(family, type, proto) # just like the builtin socket module
yield sock.read(1024)
Constructor details:
.. sourcecode:: python
Socket([family[, type[, proto]]]) -> socket object
Open a socket of the given type. The family argument specifies the
address family; it defaults to AF_INET. The type argument specifies
whether this is a stream (SOCK_STREAM, this is the default)
or datagram (SOCK_DGRAM) socket. The protocol argument defaults to 0,
specifying the default protocol. Keyword arguments are accepted.
A socket object represents one endpoint of a network connection.
"""
__slots__ = ('_fd', '_timeout', '_proactor_added')
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0,
_timeout=None, _sock=None, _proactor_added=False):
self._fd = _sock or stdsocket(family, type, proto)
self._fd.setblocking(0)
self._timeout = _timeout or _TIMEOUT
self._proactor_added = _proactor_added
def recv(self, bufsize, **kws):
"""Receive data from the socket. The return value is a string
representing the data received. The amount of data may be less than the
ammount specified by _bufsize_. """
return Recv(self, bufsize, timeout=self._timeout, **kws)
def makefile(self, mode='r', bufsize=-1):
"""
Returns a special fileobject that has corutines instead of the usual
read/readline/write methods. Will work in the same manner though.
"""
return _fileobject(self, mode, bufsize)
def send(self, data, **kws):
"""Send data to the socket. The socket must be connected to a remote
socket. Ammount sent may be less than the data provided."""
return Send(self, data, timeout=self._timeout, **kws)
def sendall(self, data, **kws):
"""Send data to the socket. The socket must be connected to a remote
socket. All the data is guaranteed to be sent."""
return SendAll(self, data, timeout=self._timeout, **kws)
def accept(self, **kws):
"""Accept a connection. The socket must be bound to an address and
listening for connections. The return value is a pair (conn, address)
where conn is a new socket object usable to send and receive data on the
connection, and address is the address bound to the socket on the other
end of the connection.
Example:
{{{
conn, address = yield mysock.accept()
}}}
"""
return Accept(self, timeout=self._timeout, **kws)
def close(self):
"""Close the socket. All future operations on the socket object will
fail. The remote end will receive no more data (after queued data is
flushed). Sockets are automatically closed when they are garbage-collected.
"""
self._fd.close()
def bind(self, *args):
"""Bind the socket to _address_. The socket must not already be bound.
(The format of _address_ depends on the address family)
"""
return self._fd.bind(*args)
def connect(self, address, **kws):
"""Connect to a remote socket at _address_. """
return Connect(self, address, timeout=self._timeout, **kws)
def fileno(self):
"""Return the socket's file descriptor """
return self._fd.fileno()
def listen(self, backlog):
"""Listen for connections made to the socket. The _backlog_ argument
specifies the maximum number of queued connections and should be at
least 1; the maximum value is system-dependent (usually 5).
"""
return self._fd.listen(backlog)
def getpeername(self):
"""Return the remote address to which the socket is connected."""
return self._fd.getpeername()
def getsockname(self):
"""Return the socket's own address. """
return self._fd.getsockname()
def settimeout(self, to):
"""Set a timeout on blocking socket operations. The value argument can
be a nonnegative float expressing seconds, timedelta or None.
"""
self._timeout = to
def gettimeout(self):
"""Return the associated timeout value. """
return self._timeout
def shutdown(self, *args):
"""Shut down one or both halves of the connection. Same as the usual
socket method."""
return self._fd.shutdown(*args)
def setblocking(self, val):
if val:
raise RuntimeError("You can't.")
def setsockopt(self, *args):
"""Set the value of the given socket option. Same as the usual socket
method."""
self._fd.setsockopt(*args)
def sendfile(self, file_handle, offset=None, length=None, blocksize=4096, **kws):
return SendFile(file_handle, self, offset, length, blocksize, **kws)
def __repr__(self):
return '<socket at 0x%X>' % id(self)
def __str__(self):
return 'sock@0x%X' % id(self)
class SocketOperation(events.TimedOperation):
"""
This is a generic class for a operation that involves some socket call.
A socket operation should subclass WriteOperation or ReadOperation, define a
`run` method and call the __init__ method of the superclass.
"""
__slots__ = (
'sock', 'last_update', 'coro', 'flags'
)
def __init__(self, sock, **kws):
"""
All the socket operations have these generic properties that the
poller and scheduler interprets:
* timeout - the ammout of time in seconds or timedelta, or the datetime
value till the poller should wait for this operation.
* weak_timeout - if this is True the timeout handling code will take
into account the time of last activity (that would be the time of last
`try_run` call)
* prio - a flag for the scheduler
"""
assert isinstance(sock, Socket)
super(SocketOperation, self).__init__(**kws)
self.sock = sock
def fileno(self):
return self.sock._fd.fileno()
def cleanup(self, sched, coro):
super(SocketOperation, self).cleanup(sched, coro)
return sched.proactor.remove_token(self)
class SendFile(SocketOperation):
"""
Uses underling OS sendfile (or equivalent) call or a regular memory copy
operation if there is no sendfile.
You can use this as a WriteAll if you specify the length.
Usage::
yield sockets.SendFile(file_object, socket_object, 0)
# will send till send operations return 0
yield sockets.SendFile(file_object, socket_object, 0, blocksize=0)
# there will be only one send operation (if successfull)
# that meas the whole file will be read in memory if there is
#no sendfile
yield sockets.SendFile(file_object, socket_object, 0, file_size)
# this will hang if we can't read file_size bytes
#from the file
"""
__slots__ = (
'sent', 'file_handle', 'offset',
'position', 'length', 'blocksize'
)
def __init__(self, file_handle, sock, offset=None, length=None, blocksize=4096, **kws):
super(SendFile, self).__init__(sock, **kws)
self.file_handle = file_handle
self.offset = self.position = offset or file_handle.tell()
self.length = length
self.sent = 0
self.blocksize = blocksize
def process(self, sched, coro):
super(SendFile, self).process(sched, coro)
return sched.proactor.request_sendfile(self, coro)
def finalize(self, sched):
super(SendFile, self).finalize(sched)
return self.sent
class Recv(SocketOperation):
"""
Example usage:
.. sourcecode:: python
yield sockets.Read(socket_object, buffer_length)
`buffer_length` is max read size, BUT, if if there are buffers from ReadLine
return them first.
"""
__slots__ = ('buff', 'len')
def __init__(self, sock, len = 4096, **kws):
super(Recv, self).__init__(sock, **kws)
self.len = len
self.buff = None
def process(self, sched, coro):
super(Recv, self).process(sched, coro)
return sched.proactor.request_recv(self, coro)
def finalize(self, sched):
super(Recv, self).finalize(sched)
return self.buff
class Send(SocketOperation):
"""
Write the buffer to the socket and return the number of bytes written.
"""
__slots__ = ('sent', 'buff')
def __init__(self, sock, buff, **kws):
super(Send, self).__init__(sock, **kws)
self.buff = str(buff)
self.sent = 0
def process(self, sched, coro):
super(Send, self).process(sched, coro)
return sched.proactor.request_send(self, coro)
def finalize(self, sched):
super(Send, self).finalize(sched)
return self.sent
class SendAll(SocketOperation):
"""
Run this operation till all the bytes have been written.
"""
__slots__ = ('sent', 'buff')
def __init__(self, sock, buff, **kws):
super(SendAll, self).__init__(sock, **kws)
self.buff = str(buff)
self.sent = 0
def process(self, sched, coro):
super(SendAll, self).process(sched, coro)
return sched.proactor.request_sendall(self, coro)
def finalize(self, sched):
super(SendAll, self).finalize(sched)
return self.sent
class Accept(SocketOperation):
"""
Returns a (conn, addr) tuple when the operation completes.
"""
__slots__ = ('conn', 'addr', 'cbuff')
def __init__(self, sock, **kws):
super(Accept, self).__init__(sock, **kws)
self.conn = None
def process(self, sched, coro):
super(Accept, self).process(sched, coro)
return sched.proactor.request_accept(self, coro)
def finalize(self, sched):
super(Accept, self).finalize(sched)
return (self.conn, self.addr)
def __repr__(self):
return "<%s at 0x%X %s conn:%r to:%s>" % (
self.__class__.__name__,
id(self),
self.sock,
self.conn,
self.timeout
)
class Connect(SocketOperation):
"""
"""
__slots__ = ('addr', 'conn', 'connect_attempted')
def __init__(self, sock, addr, **kws):
"""
Connect to the given `addr` using `sock`.
"""
super(Connect, self).__init__(sock, **kws)
self.addr = addr
self.connect_attempted = False
def process(self, sched, coro):
super(Connect, self).process(sched, coro)
return sched.proactor.request_connect(self, coro)
def finalize(self, sched):
super(Connect, self).finalize(sched)
return self.sock
@coro
def RecvAll(sock, length, **k):
recvd = 0
data = []
while recvd < length:
chunk = (yield Recv(sock, length-recvd, **k))
recvd += len(chunk)
data.append(chunk)
assert recvd == length
raise StopIteration(''.join(data))
class _fileobject(object):
"""Faux file object attached to a socket object."""
default_bufsize = 8192
name = "<socket>"
__slots__ = ("mode", "bufsize", "softspace",
# "closed" is a property, see below
"_sock", "_rbufsize", "_wbufsize", "_rbuf", "_wbuf",
"_close")
def __init__(self, sock, mode='rb', bufsize=-1, close=False):
self._sock = sock
self.mode = mode # Not actually used in this version
if bufsize < 0:
bufsize = self.default_bufsize
self.bufsize = bufsize
self.softspace = False
if bufsize == 0:
self._rbufsize = 1
elif bufsize == 1:
self._rbufsize = self.default_bufsize
else:
self._rbufsize = bufsize
self._wbufsize = bufsize
self._rbuf = "" # A string
self._wbuf = [] # A list of strings
self._close = close
def _getclosed(self):
return self._sock is None
closed = property(_getclosed, doc="True if the file is closed")
@coro
def close(self, **kws):
try:
if self._sock:
yield self.flush(**kws)
finally:
if self._close:
self._sock.close()
self._sock = None
def __del__(self):
try:
self.close()
except:
# close() may fail if __init__ didn't complete
pass
@coro
def flush(self, **kws):
if self._wbuf:
buffer = "".join(self._wbuf)
self._wbuf = []
yield self._sock.sendall(buffer, **kws)
def fileno(self):
return self._sock.fileno()
@coro
def write(self, data, **kws):
data = str(data) # XXX Should really reject non-string non-buffers
if not data:
return
self._wbuf.append(data)
if (self._wbufsize == 0 or
self._wbufsize == 1 and '\n' in data or
self._get_wbuf_len() >= self._wbufsize):
yield self.flush(**kws)
@coro
def writelines(self, list, **kws):
# XXX We could do better here for very long lists
# XXX Should really reject non-string non-buffers
self._wbuf.extend(filter(None, map(str, list)))
if (self._wbufsize <= 1 or
self._get_wbuf_len() >= self._wbufsize):
yield self.flush(**kws)
def _get_wbuf_len(self):
buf_len = 0
for x in self._wbuf:
buf_len += len(x)
return buf_len
#~ from cogen.core.coroutines import debug_coro
#~ @debug_coro
@coro
def read(self, size=-1, **kws):
data = self._rbuf
if size < 0:
# Read until EOF
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
if self._rbufsize <= 1:
recv_size = self.default_bufsize
else:
recv_size = self._rbufsize
while True:
data = (yield self._sock.recv(recv_size, **kws))
if not data:
break
buffers.append(data)
raise StopIteration("".join(buffers))
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
raise StopIteration(data[:size])
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
left = size - buf_len
recv_size = max(self._rbufsize, left)
data = (yield self._sock.recv(recv_size, **kws))
if not data:
break
buffers.append(data)
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
raise StopIteration("".join(buffers))
#~ from coroutines import debug_coro
#~ @debug_coro
@coro
def readline(self, size=-1, **kws):
data = self._rbuf
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
assert data == ""
buffers = []
recv = self._sock.recv
while data != "\n":
data = (yield recv(1, **kws))
if not data:
break
buffers.append(data)
raise StopIteration("".join(buffers))
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
raise StopIteration(data[:nl])
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = (yield self._sock.recv(self._rbufsize, **kws))
if not data:
break
buffers.append(data)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
raise StopIteration("".join(buffers))
else:
# Read until size bytes or \n or EOF seen, whichever comes first
nl = data.find('\n', 0, size)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
raise StopIteration(data[:nl])
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
raise StopIteration(data[:size])
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = (yield self._sock.recv(self._rbufsize, **kws))
if not data:
break
buffers.append(data)
left = size - buf_len
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
raise StopIteration("".join(buffers))
@coro
def readlines(self, sizehint=0, **kws):
total = 0
list = []
while True:
line = (yield self.readline(**kws))
if not line:
break
list.append(line)
total += len(line)
if sizehint and total >= sizehint:
break
raise StopIteration(list)
| 32.991749
| 92
| 0.553644
| 2,294
| 19,993
| 4.687446
| 0.178727
| 0.017112
| 0.009207
| 0.009765
| 0.317865
| 0.274156
| 0.254069
| 0.206175
| 0.149912
| 0.149912
| 0
| 0.005374
| 0.348472
| 19,993
| 605
| 93
| 33.046281
| 0.820129
| 0.278447
| 0
| 0.415301
| 0
| 0
| 0.041717
| 0
| 0
| 0
| 0
| 0.001653
| 0.008197
| 1
| 0.15847
| false
| 0.002732
| 0.008197
| 0.021858
| 0.327869
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6170d2f693f08c4d21d708512f6fdcb2389cdda
| 1,183
|
py
|
Python
|
scripts/update_readme_metrics.py
|
ygormutti/executor-exporter
|
4b985fdf03cbf0515b912aa9631c3c8f0c81a461
|
[
"Apache-2.0"
] | 1
|
2022-02-24T02:21:14.000Z
|
2022-02-24T02:21:14.000Z
|
scripts/update_readme_metrics.py
|
ygormutti/executor-exporter
|
4b985fdf03cbf0515b912aa9631c3c8f0c81a461
|
[
"Apache-2.0"
] | null | null | null |
scripts/update_readme_metrics.py
|
ygormutti/executor-exporter
|
4b985fdf03cbf0515b912aa9631c3c8f0c81a461
|
[
"Apache-2.0"
] | null | null | null |
from sys import argv
from executor_exporter.exporter import metrics
def update_readme_metrics(readme_path):
columns = ("Name", "Type", "Labels", "Description")
sep = " | "
table_lines = [sep.join(columns), sep.join(["---"] * len(columns))]
for metric in metrics:
table_lines.append(
sep.join(
(
metric._name,
metric._type,
", ".join(metric._labelnames),
metric._documentation,
)
)
)
readme_lines = []
with open(readme_path) as readme_file:
for lineno, line in enumerate(readme_file.readlines()):
if "metrics:begin" in line:
begin = lineno
elif "metrics:end" in line:
end = lineno
readme_lines.append(line)
readme_lines = [
*readme_lines[: begin + 1],
"\n".join(table_lines) + "\n",
*readme_lines[end:],
]
with open(readme_path, "w") as readme_file:
readme_file.writelines(readme_lines)
if __name__ == "__main__":
readme_path = argv[1]
update_readme_metrics(readme_path)
| 26.288889
| 71
| 0.540997
| 123
| 1,183
| 4.918699
| 0.357724
| 0.109091
| 0.06281
| 0.082645
| 0.095868
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002581
| 0.344886
| 1,183
| 44
| 72
| 26.886364
| 0.778065
| 0
| 0
| 0
| 0
| 0
| 0.059172
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028571
| false
| 0
| 0.057143
| 0
| 0.085714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e61732e97bfa8fdbdf35bca08255c56c7737afe4
| 1,717
|
py
|
Python
|
accounts/migrations/0002_auto_20160226_1548.py
|
adrienlachaize/dezede
|
584ec30cedab95152e2f95595b7691a04e6736e2
|
[
"BSD-3-Clause"
] | 15
|
2015-02-10T21:16:31.000Z
|
2021-03-25T16:46:20.000Z
|
accounts/migrations/0002_auto_20160226_1548.py
|
adrienlachaize/dezede
|
584ec30cedab95152e2f95595b7691a04e6736e2
|
[
"BSD-3-Clause"
] | 4
|
2021-02-10T15:42:08.000Z
|
2022-03-11T23:20:38.000Z
|
accounts/migrations/0002_auto_20160226_1548.py
|
adrienlachaize/dezede
|
584ec30cedab95152e2f95595b7691a04e6736e2
|
[
"BSD-3-Clause"
] | 6
|
2016-07-10T14:20:48.000Z
|
2022-01-19T18:34:02.000Z
|
import accounts.models
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.AlterModelManagers(
name='hierarchicuser',
managers=[
('objects', accounts.models.HierarchicUserManager()),
],
),
migrations.AlterField(
model_name='hierarchicuser',
name='email',
field=models.EmailField(blank=True, max_length=254, verbose_name='email address'),
),
migrations.AlterField(
model_name='hierarchicuser',
name='groups',
field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups'),
),
migrations.AlterField(
model_name='hierarchicuser',
name='last_login',
field=models.DateTimeField(blank=True, null=True, verbose_name='last login'),
),
migrations.AlterField(
model_name='hierarchicuser',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=30, unique=True, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.')], verbose_name='username'),
),
]
| 42.925
| 409
| 0.630169
| 173
| 1,717
| 6.132948
| 0.49711
| 0.084826
| 0.094251
| 0.109331
| 0.177191
| 0.177191
| 0
| 0
| 0
| 0
| 0
| 0.008449
| 0.241701
| 1,717
| 39
| 410
| 44.025641
| 0.806452
| 0
| 0
| 0.371429
| 0
| 0
| 0.293535
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.085714
| 0
| 0.171429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e618960b87b4b729b558ed9c9d5f90f4d0854b6a
| 8,575
|
py
|
Python
|
danceschool/private_lessons/forms.py
|
django-danceschool/django-danceschool
|
65ae09ffdcb0821e82df0e1f634fe13c0384a525
|
[
"BSD-3-Clause"
] | 32
|
2017-09-12T04:25:25.000Z
|
2022-03-21T10:48:07.000Z
|
danceschool/private_lessons/forms.py
|
django-danceschool/django-danceschool
|
65ae09ffdcb0821e82df0e1f634fe13c0384a525
|
[
"BSD-3-Clause"
] | 97
|
2017-09-01T02:43:08.000Z
|
2022-01-03T18:20:34.000Z
|
danceschool/private_lessons/forms.py
|
django-danceschool/django-danceschool
|
65ae09ffdcb0821e82df0e1f634fe13c0384a525
|
[
"BSD-3-Clause"
] | 19
|
2017-09-26T13:34:46.000Z
|
2022-03-21T10:48:10.000Z
|
from django import forms
from django.utils.translation import gettext_lazy as _
from django.conf import settings
from django.core.exceptions import ValidationError
from datetime import datetime, timedelta
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div, Submit
from danceschool.core.constants import getConstant
from danceschool.core.models import DanceRole, Location, Room, Instructor, PricingTier
from danceschool.core.utils.timezone import ensure_localtime
from danceschool.core.forms import LocationWithDataWidget
from .models import InstructorAvailabilitySlot
def get_duration_choices():
return [(x, x) for x in range(
getConstant('privateLessons__minimumLessonLength'),
getConstant('privateLessons__maximumLessonLength') + 1,
getConstant('privateLessons__lessonLengthInterval'),
)]
def get_default_duration():
return getConstant('privateLessons__defaultLessonLength')
class SlotBookingForm(forms.Form):
slotId = forms.IntegerField(required=True, widget=forms.HiddenInput)
duration = forms.ChoiceField(
label=_('Duration'), choices=get_duration_choices,
initial=get_default_duration
)
role = forms.ModelChoiceField(label=_('Dance role'), queryset=DanceRole.objects.all())
participants = forms.IntegerField(
label=_('Expected # Participants'), initial=1, min_value=1,
help_text=_('Be advised that group lessons may be charged a different rate.')
)
comments = forms.CharField(
label=_('Comments/Notes'), required=False,
help_text=_(
'Please enter any comments or notes that you would like to be ' +
'provided to the instructor before the lesson, such as the topics ' +
'on which you may want to focus.'
)
)
def __init__(self, *args, **kwargs):
user = kwargs.pop('user', None)
# Initialize the default form
super().__init__(*args, **kwargs)
# Allow users with appropriate permissions to process door registrations.
if user and user.has_perm('core.accept_door_payments'):
self.fields['payAtDoor'] = forms.BooleanField(
required=False, label=_('Door/Invoice Registration')
)
class SlotCreationForm(forms.Form):
instructorId = forms.ModelChoiceField(
label=_('Instructor'), queryset=Instructor.objects.all(),
widget=forms.HiddenInput, required=True
)
startDate = forms.DateField(label=_('Start date'), required=True, widget=forms.HiddenInput)
endDate = forms.DateField(label=_('End date'), required=True, widget=forms.HiddenInput)
startTime = forms.TimeField(
label=_('Start time'), required=True,
input_formats=(
getattr(settings, 'TIME_INPUT_FORMATS', []) +
['%I:%M %p', '%-I:%M %p', '%I:%M%p', '%-I:%M%p']
)
)
endTime = forms.TimeField(
label=_('End time'), required=True,
input_formats=(
getattr(settings, 'TIME_INPUT_FORMATS', []) +
['%I:%M %p', '%-I:%M %p', '%I:%M%p', '%-I:%M%p']
),
)
location = forms.ModelChoiceField(
label=_('Location'),
queryset=Location.objects.exclude(status=Location.StatusChoices.former),
required=False, widget=LocationWithDataWidget
)
room = forms.ModelChoiceField(
label=_('Room'),
queryset=Room.objects.exclude(location__status=Location.StatusChoices.former),
required=False
)
pricingTier = forms.ModelChoiceField(
label=_('Pricing Tier'), queryset=PricingTier.objects.filter(expired=False),
required=False,
help_text=_(
'A pricing tier is required for online registration and payment. ' +
'If your school handles scheduling, but not payment for lessons, ' +
'then leave this blank.'
)
)
status = forms.ChoiceField(
label=_('Initial Status'), required=True,
choices=InstructorAvailabilitySlot.SlotStatus.choices,
initial=InstructorAvailabilitySlot.SlotStatus.available
)
def clean(self):
'''
Only allow submission if there are not already slots in the submitted window,
and only allow rooms associated with the chosen location.
'''
super().clean()
startDate = self.cleaned_data.get('startDate')
endDate = self.cleaned_data.get('endDate')
startTime = self.cleaned_data.get('startTime')
endTime = self.cleaned_data.get('endTime')
instructor = self.cleaned_data.get('instructorId')
existingSlots = InstructorAvailabilitySlot.objects.filter(
instructor=instructor,
startTime__gt=(
ensure_localtime(datetime.combine(startDate, startTime)) -
timedelta(minutes=getConstant('privateLessons__lessonLengthInterval'))
),
startTime__lt=ensure_localtime(datetime.combine(endDate, endTime)),
)
if existingSlots.exists():
raise ValidationError(
_('Newly created slots cannot overlap existing slots for this instructor.'),
code='invalid'
)
class SlotUpdateForm(forms.Form):
slotIds = forms.ModelMultipleChoiceField(
required=True, widget=forms.MultipleHiddenInput,
queryset=InstructorAvailabilitySlot.objects.all()
)
updateStatus = forms.ChoiceField(
label=_('Update Status'), required=True,
choices=InstructorAvailabilitySlot.SlotStatus.choices,
initial=InstructorAvailabilitySlot.SlotStatus.available
)
updateLocation = forms.ModelChoiceField(
label=_('Update Location'),
queryset=Location.objects.exclude(status=Location.StatusChoices.former),
required=False, widget=LocationWithDataWidget
)
updateRoom = forms.ModelChoiceField(
label=_('Room'),
queryset=Room.objects.exclude(location__status=Location.StatusChoices.former),
required=False
)
updatePricing = forms.ModelChoiceField(
label=_('Update pricing'), queryset=PricingTier.objects.filter(expired=False), required=False,
help_text=_(
'A pricing tier is required for online registration and payment. ' +
'If your school handles scheduling, but not payment for lessons, ' +
'then leave this blank.'
)
)
deleteSlot = forms.BooleanField(
label=_('Delete slot'), initial=False,
help_text=_('Note that only slots with no current bookings may be deleted at this time.'),
required=False
)
class PrivateLessonStudentInfoForm(forms.Form):
'''
This is the form customers use to fill out their contact info
for private lessons that don't involve online payment only.
'''
firstName = forms.CharField(label=_('First Name'))
lastName = forms.CharField(label=_('Last Name'))
email = forms.EmailField()
phone = forms.CharField(
required=False, label=_('Telephone (optional)'),
help_text=_('We may use this to notify you in event of a cancellation.')
)
agreeToPolicies = forms.BooleanField(
required=True,
label=_('<strong>I agree to all policies (required)</strong>'),
help_text=_('By checking, you agree to abide by all policies.')
)
def __init__(self, *args, **kwargs):
self._request = kwargs.pop('request', None)
user = getattr(self._request, 'user', None)
payAtDoor = kwargs.pop('payAtDoor', False)
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_tag = False # Our template must explicitly include the <form tag>
if user and hasattr(user, 'customer') and user.customer and not payAtDoor:
# Input existing info for users who are logged in and have signed up before
self.fields['firstName'].initial = user.customer.first_name or user.first_name
self.fields['lastName'].initial = user.customer.last_name or user.last_name
self.fields['email'].initial = user.customer.email or user.email
self.fields['phone'].initial = user.customer.phone
self.helper.layout = Layout(
Div('firstName', 'lastName', 'email', css_class='form-inline'),
Div('phone', css_class='form-inline'),
Div('agreeToPolicies', css_class='card card-body bg-light'),
Submit('submit', _('Complete Registration'))
)
| 38.977273
| 102
| 0.661574
| 901
| 8,575
| 6.169811
| 0.310766
| 0.021587
| 0.037417
| 0.004317
| 0.251124
| 0.229897
| 0.216226
| 0.216226
| 0.216226
| 0.216226
| 0
| 0.000455
| 0.23137
| 8,575
| 219
| 103
| 39.155251
| 0.842968
| 0.056443
| 0
| 0.204545
| 0
| 0
| 0.201319
| 0.025134
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028409
| false
| 0
| 0.068182
| 0.011364
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e61a46397ce546d99911d246529ba90ca1cf69a8
| 32,292
|
py
|
Python
|
engine/modules.py
|
scofield7419/HeSyFu
|
cc06a644918d65aa898f65348077f3d9a3e5252b
|
[
"Apache-2.0"
] | 1
|
2021-11-04T02:31:39.000Z
|
2021-11-04T02:31:39.000Z
|
engine/modules.py
|
scofield7419/HeSyFu
|
cc06a644918d65aa898f65348077f3d9a3e5252b
|
[
"Apache-2.0"
] | null | null | null |
engine/modules.py
|
scofield7419/HeSyFu
|
cc06a644918d65aa898f65348077f3d9a3e5252b
|
[
"Apache-2.0"
] | 2
|
2021-12-28T03:38:37.000Z
|
2021-12-29T12:56:41.000Z
|
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from typing import List, Tuple, Dict
import numpy as np
from torch.autograd import Variable
class DepGCN(nn.Module):
"""
Label-aware Dependency Convolutional Neural Network Layer
"""
def __init__(self, dep_num, dep_dim, in_features, out_features):
super(DepGCN, self).__init__()
self.dep_dim = dep_dim
self.in_features = in_features
self.out_features = out_features
self.dep_embedding = nn.Embedding(dep_num, dep_dim, padding_idx=0)
self.dep_attn = nn.Linear(dep_dim + in_features, out_features)
self.dep_fc = nn.Linear(dep_dim, out_features)
self.relu = nn.ReLU()
def forward(self, text, dep_mat, dep_labels):
dep_label_embed = self.dep_embedding(dep_labels)
batch_size, seq_len, feat_dim = text.shape
val_us = text.unsqueeze(dim=2)
val_us = val_us.repeat(1, 1, seq_len, 1)
val_sum = torch.cat([val_us, dep_label_embed], dim=-1)
r = self.dep_attn(val_sum)
p = torch.sum(r, dim=-1)
mask = (dep_mat == 0).float() * (-1e30)
p = p + mask
p = torch.softmax(p, dim=2)
p_us = p.unsqueeze(3).repeat(1, 1, 1, feat_dim)
output = val_us + self.dep_fc(dep_label_embed)
output = torch.mul(p_us, output)
output_sum = torch.sum(output, dim=2)
output_sum = self.relu(output_sum)
return output_sum
class ConstGCN(nn.Module):
"""
Label-aware Constituency Convolutional Neural Network Layer
"""
def __init__(
self,
num_inputs,
num_units,
num_labels,
dropout=0.0,
in_arcs=True,
out_arcs=True,
batch_first=False,
use_gates=True,
residual=False,
no_loop=False,
non_linearity="relu",
edge_dropout=0.0,
):
super(ConstGCN, self).__init__()
self.in_arcs = in_arcs
self.out_arcs = out_arcs
self.no_loop = no_loop
self.retain = 1.0 - edge_dropout
self.num_inputs = num_inputs
self.num_units = num_units
self.num_labels = num_labels
self.batch_first = batch_first
self.non_linearity = non_linearity
self.sigmoid = nn.Sigmoid()
self.use_gates = use_gates
self.residual = residual
self.dropout = nn.Dropout(p=dropout)
self.layernorm = nn.LayerNorm(num_units)
if in_arcs:
self.V_in = Parameter(torch.Tensor(self.num_inputs, self.num_units))
nn.init.xavier_normal_(self.V_in)
self.b_in = Parameter(torch.Tensor(num_labels, self.num_units))
nn.init.constant_(self.b_in, 0)
if self.use_gates:
self.V_in_gate = Parameter(torch.Tensor(self.num_inputs, 1))
nn.init.xavier_normal_(self.V_in_gate)
self.b_in_gate = Parameter(torch.Tensor(num_labels, 1))
nn.init.constant_(self.b_in_gate, 1)
if out_arcs:
# self.V_out = autograd.Variable(torch.FloatTensor(self.num_inputs, self.num_units))
self.V_out = Parameter(torch.Tensor(self.num_inputs, self.num_units))
nn.init.xavier_normal_(self.V_out)
# self.b_out = autograd.Variable(torch.FloatTensor(num_labels, self.num_units))
self.b_out = Parameter(torch.Tensor(num_labels, self.num_units))
nn.init.constant_(self.b_out, 0)
if self.use_gates:
self.V_out_gate = Parameter(torch.Tensor(self.num_inputs, 1))
nn.init.xavier_normal_(self.V_out_gate)
self.b_out_gate = Parameter(torch.Tensor(num_labels, 1))
nn.init.constant_(self.b_out_gate, 1)
if not self.no_loop:
self.W_self_loop = Parameter(torch.Tensor(self.num_inputs, self.num_units))
nn.init.xavier_normal_(self.W_self_loop)
if self.use_gates:
self.W_self_loop_gate = Parameter(torch.Tensor(self.num_inputs, 1))
nn.init.xavier_normal_(self.W_self_loop_gate)
def forward(
self,
src,
arc_tensor_in=None,
arc_tensor_out=None,
label_tensor_in=None,
label_tensor_out=None,
mask_in=None,
mask_out=None,
mask_loop=None,
sent_mask=None,
):
if not self.batch_first:
encoder_outputs = src.permute(1, 0, 2).contiguous()
else:
encoder_outputs = src.contiguous()
batch_size = encoder_outputs.size()[0]
seq_len = encoder_outputs.size()[1]
max_degree = 1
input_ = encoder_outputs.view(
(batch_size * seq_len, self.num_inputs)
) # [b* t, h]
input_ = self.dropout(input_)
if self.in_arcs:
input_in = torch.mm(input_, self.V_in) # [b* t, h] * [h,h] = [b*t, h]
first_in = input_in.index_select(
0, arc_tensor_in[0] * seq_len + arc_tensor_in[1]
) # [b* t* degr, h]
second_in = self.b_in.index_select(0, label_tensor_in[0]) # [b* t* degr, h]
in_ = first_in + second_in
degr = int(first_in.size()[0] / batch_size // seq_len)
in_ = in_.view((batch_size, seq_len, degr, self.num_units))
if self.use_gates:
# compute gate weights
input_in_gate = torch.mm(
input_, self.V_in_gate
) # [b* t, h] * [h,h] = [b*t, h]
first_in_gate = input_in_gate.index_select(
0, arc_tensor_in[0] * seq_len + arc_tensor_in[1]
) # [b* t* mxdeg, h]
second_in_gate = self.b_in_gate.index_select(0, label_tensor_in[0])
in_gate = (first_in_gate + second_in_gate).view(
(batch_size, seq_len, degr)
)
max_degree += degr
if self.out_arcs:
input_out = torch.mm(input_, self.V_out) # [b* t, h] * [h,h] = [b* t, h]
first_out = input_out.index_select(
0, arc_tensor_out[0] * seq_len + arc_tensor_out[1]
) # [b* t* mxdeg, h]
second_out = self.b_out.index_select(0, label_tensor_out[0])
degr = int(first_out.size()[0] / batch_size // seq_len)
max_degree += degr
out_ = (first_out + second_out).view(
(batch_size, seq_len, degr, self.num_units)
)
if self.use_gates:
# compute gate weights
input_out_gate = torch.mm(
input_, self.V_out_gate
) # [b* t, h] * [h,h] = [b* t, h]
first_out_gate = input_out_gate.index_select(
0, arc_tensor_out[0] * seq_len + arc_tensor_out[1]
) # [b* t* mxdeg, h]
second_out_gate = self.b_out_gate.index_select(0, label_tensor_out[0])
out_gate = (first_out_gate + second_out_gate).view(
(batch_size, seq_len, degr)
)
if self.no_loop:
if self.in_arcs and self.out_arcs:
potentials = torch.cat((in_, out_), dim=2) # [b, t, mxdeg, h]
if self.use_gates:
potentials_gate = torch.cat(
(in_gate, out_gate), dim=2
) # [b, t, mxdeg, h]
mask_soft = torch.cat((mask_in, mask_out), dim=1) # [b* t, mxdeg]
elif self.out_arcs:
potentials = out_ # [b, t, 2*mxdeg+1, h]
if self.use_gates:
potentials_gate = out_gate # [b, t, mxdeg, h]
mask_soft = mask_out # [b* t, mxdeg]
elif self.in_arcs:
potentials = in_ # [b, t, 2*mxdeg+1, h]
if self.use_gates:
potentials_gate = in_gate # [b, t, mxdeg, h]
mask_soft = mask_in # [b* t, mxdeg]
max_degree -= 1
else:
same_input = torch.mm(input_, self.W_self_loop).view(
encoder_outputs.size(0), encoder_outputs.size(1), -1
)
same_input = same_input.view(
encoder_outputs.size(0),
encoder_outputs.size(1),
1,
self.W_self_loop.size(1),
)
if self.use_gates:
same_input_gate = torch.mm(input_, self.W_self_loop_gate).view(
encoder_outputs.size(0), encoder_outputs.size(1), -1
)
if self.in_arcs and self.out_arcs:
potentials = torch.cat(
(in_, out_, same_input), dim=2
) # [b, t, mxdeg, h]
if self.use_gates:
potentials_gate = torch.cat(
(in_gate, out_gate, same_input_gate), dim=2
) # [b, t, mxdeg, h]
mask_soft = torch.cat(
(mask_in, mask_out, mask_loop), dim=1
) # [b* t, mxdeg]
elif self.out_arcs:
potentials = torch.cat(
(out_, same_input), dim=2
) # [b, t, 2*mxdeg+1, h]
if self.use_gates:
potentials_gate = torch.cat(
(out_gate, same_input_gate), dim=2
) # [b, t, mxdeg, h]
mask_soft = torch.cat((mask_out, mask_loop), dim=1) # [b* t, mxdeg]
elif self.in_arcs:
potentials = torch.cat(
(in_, same_input), dim=2
) # [b, t, 2*mxdeg+1, h]
if self.use_gates:
potentials_gate = torch.cat(
(in_gate, same_input_gate), dim=2
) # [b, t, mxdeg, h]
mask_soft = torch.cat((mask_in, mask_loop), dim=1) # [b* t, mxdeg]
else:
potentials = same_input # [b, t, 2*mxdeg+1, h]
if self.use_gates:
potentials_gate = same_input_gate # [b, t, mxdeg, h]
mask_soft = mask_loop # [b* t, mxdeg]
potentials_resh = potentials.view(
(batch_size * seq_len, max_degree, self.num_units)
) # [h, b * t, mxdeg]
if self.use_gates:
potentials_r = potentials_gate.view(
(batch_size * seq_len, max_degree)
) # [b * t, mxdeg]
probs_det_ = (self.sigmoid(potentials_r) * mask_soft).unsqueeze(
2
) # [b * t, mxdeg]
potentials_masked = potentials_resh * probs_det_ # [b * t, mxdeg,h]
else:
# NO Gates
potentials_masked = potentials_resh * mask_soft.unsqueeze(2)
if self.retain == 1 or not self.training:
pass
else:
mat_1 = torch.Tensor(mask_soft.data.size()).uniform_(0, 1)
ret = torch.Tensor([self.retain])
mat_2 = (mat_1 < ret).float()
drop_mask = Variable(mat_2, requires_grad=False)
if potentials_resh.is_cuda:
drop_mask = drop_mask.cuda()
potentials_masked *= drop_mask.unsqueeze(2)
potentials_masked_ = potentials_masked.sum(dim=1) # [b * t, h]
potentials_masked_ = self.layernorm(potentials_masked_) * sent_mask.view(
batch_size * seq_len
).unsqueeze(1)
potentials_masked_ = self.non_linearity(potentials_masked_) # [b * t, h]
result_ = potentials_masked_.view(
(batch_size, seq_len, self.num_units)
) # [ b, t, h]
result_ = result_ * sent_mask.unsqueeze(2) # [b, t, h]
memory_bank = result_ # [t, b, h]
if self.residual:
memory_bank += src
return memory_bank
class BilinearScorer(nn.Module):
def __init__(self, hidden_dim, role_vocab_size, dropout=0.0, gpu_id=-1):
super(BilinearScorer, self).__init__()
if gpu_id > -1:
self.use_gpu = True
else:
self.use_gpu = False
self.hidden_dim = hidden_dim
self.role_vocab_size = role_vocab_size
self.dropout = nn.Dropout(p=dropout)
self.U = Parameter(
torch.Tensor(self.hidden_dim, self.role_vocab_size, self.hidden_dim)
)
nn.init.orthogonal_(self.U)
self.bias1 = Parameter(torch.Tensor(1, self.hidden_dim * self.role_vocab_size))
nn.init.constant_(self.bias1, 0)
self.bias2 = Parameter(torch.Tensor(1, self.role_vocab_size))
nn.init.constant_(self.bias2, 0)
def forward(self, pred_input, args_input):
b, t, h = pred_input.data.shape
pred_input = self.dropout(pred_input)
args_input = self.dropout(args_input)
first = (
torch.mm(pred_input.view(-1, h), self.U.view(h, -1)) + self.bias1
) # [b*t, h] * [h,r*h] = [b*t,r*h]
out = torch.bmm(
first.view(-1, self.role_vocab_size, h), args_input.view(-1, h).unsqueeze(2)
) # [b*t,r,h] [b*t, h, 1] = [b*t, r]
out = out.squeeze(2) + self.bias2
return out
class ScaledDotProductAttention(nn.Module):
def __init__(self, d_k):
super(ScaledDotProductAttention, self).__init__()
self.d_k = d_k
def forward(self, q, k, v, attn_mask):
attn_score = torch.matmul(q, k.transpose(-1, -2)) / np.sqrt(self.d_k)
attn_score.masked_fill_(attn_mask, -1e9)
attn_weights = nn.Softmax(dim=-1)(attn_score)
output = torch.matmul(attn_weights, v)
return output, attn_weights
class MultiHeadAttention(nn.Module):
def __init__(self, d_model, n_heads):
super(MultiHeadAttention, self).__init__()
self.n_heads = n_heads
self.d_k = self.d_v = d_model // n_heads
self.WQ = nn.Linear(d_model, d_model)
self.WK = nn.Linear(d_model, d_model)
self.WV = nn.Linear(d_model, d_model)
self.scaled_dot_product_attn = ScaledDotProductAttention(self.d_k)
self.linear = nn.Linear(n_heads * self.d_v, d_model)
def forward(self, Q, K, V, attn_mask):
batch_size = Q.size(0)
q_heads = self.WQ(Q).view(batch_size, -1, self.n_heads, self.d_k).transpose(1, 2)
k_heads = self.WK(K).view(batch_size, -1, self.n_heads, self.d_k).transpose(1, 2)
v_heads = self.WV(V).view(batch_size, -1, self.n_heads, self.d_v).transpose(1, 2)
attn_mask = attn_mask.unsqueeze(1).repeat(1, self.n_heads, 1, 1)
attn, attn_weights = self.scaled_dot_product_attn(q_heads, k_heads, v_heads, attn_mask)
attn = attn.transpose(1, 2).contiguous().view(batch_size, -1, self.n_heads * self.d_v)
output = self.linear(attn)
return output, attn_weights
class PositionWiseFeedForwardNetwork(nn.Module):
def __init__(self, d_model, d_ff):
super(PositionWiseFeedForwardNetwork, self).__init__()
self.linear1 = nn.Linear(d_model, d_ff)
self.linear2 = nn.Linear(d_ff, d_model)
self.relu = nn.ReLU()
def forward(self, inputs):
output = self.relu(self.linear1(inputs))
output = self.linear2(output)
return output
class EncoderLayer(nn.Module):
def __init__(self, d_model, n_heads, p_drop, d_ff):
super(EncoderLayer, self).__init__()
self.mha = MultiHeadAttention(d_model, n_heads)
self.dropout1 = nn.Dropout(p_drop)
self.layernorm1 = nn.LayerNorm(d_model, eps=1e-6)
self.ffn = PositionWiseFeedForwardNetwork(d_model, d_ff)
self.dropout2 = nn.Dropout(p_drop)
self.layernorm2 = nn.LayerNorm(d_model, eps=1e-6)
def forward(self, inputs, attn_mask):
attn_outputs, attn_weights = self.mha(inputs, inputs, inputs, attn_mask)
attn_outputs = self.dropout1(attn_outputs)
attn_outputs = self.layernorm1(inputs + attn_outputs)
ffn_outputs = self.ffn(attn_outputs)
ffn_outputs = self.dropout2(ffn_outputs)
ffn_outputs = self.layernorm2(attn_outputs + ffn_outputs)
return ffn_outputs, attn_weights
class TransformerEncoder(nn.Module):
def __init__(self, vocab_size, seq_len=300, d_model=768, n_layers=3, n_heads=8, p_drop=0.1, d_ff=500, pad_id=0):
super(TransformerEncoder, self).__init__()
self.pad_id = pad_id
self.sinusoid_table = self.get_sinusoid_table(seq_len + 1, d_model) # (seq_len+1, d_model)
self.embedding = nn.Embedding(vocab_size, d_model)
self.pos_embedding = nn.Embedding.from_pretrained(self.sinusoid_table, freeze=True)
self.layers = nn.ModuleList([EncoderLayer(d_model, n_heads, p_drop, d_ff) for _ in range(n_layers)])
def forward(self, inputs):
positions = torch.arange(inputs.size(1), device=inputs.device, dtype=inputs.dtype).repeat(inputs.size(0), 1) + 1
position_pad_mask = inputs.eq(self.pad_id)
positions.masked_fill_(position_pad_mask, 0)
outputs = self.embedding(inputs) + self.pos_embedding(positions)
attn_pad_mask = self.get_attention_padding_mask(inputs, inputs, self.pad_id)
for layer in self.layers:
outputs, attn_weights = layer(outputs, attn_pad_mask)
return outputs
def get_attention_padding_mask(self, q, k, pad_id):
attn_pad_mask = k.eq(pad_id).unsqueeze(1).repeat(1, q.size(1), 1)
return attn_pad_mask
def get_sinusoid_table(self, seq_len, d_model):
def get_angle(pos, i, d_model):
return pos / np.power(10000, (2 * (i // 2)) / d_model)
sinusoid_table = np.zeros((seq_len, d_model))
for pos in range(seq_len):
for i in range(d_model):
if i % 2 == 0:
sinusoid_table[pos, i] = np.sin(get_angle(pos, i, d_model))
else:
sinusoid_table[pos, i] = np.cos(get_angle(pos, i, d_model))
return torch.FloatTensor(sinusoid_table)
def allowed_transitions(constraint_type: str, labels: Dict[int, str]) -> List[Tuple[int, int]]:
"""
Given labels and a constraint type, returns the allowed transitions. It will
additionally include transitions for the start and end states, which are used
by the conditional random field.
Parameters
----------
constraint_type : ``str``, required
Indicates which constraint to apply. Current choices are
"BIO", "IOB1", "BIOUL", and "BMES".
labels : ``Dict[int, str]``, required
A mapping {label_id -> label}. Most commonly this would be the value from
Vocabulary.get_index_to_token_vocabulary()
Returns
-------
``List[Tuple[int, int]]``
The allowed transitions (from_label_id, to_label_id).
"""
num_labels = len(labels)
start_tag = num_labels
end_tag = num_labels + 1
labels_with_boundaries = list(labels.items()) + [(start_tag, "START"), (end_tag, "END")]
allowed = []
for from_label_index, from_label in labels_with_boundaries:
if from_label in ("START", "END"):
from_tag = from_label
from_entity = ""
else:
from_tag = from_label[0]
from_entity = from_label[1:]
for to_label_index, to_label in labels_with_boundaries:
if to_label in ("START", "END"):
to_tag = to_label
to_entity = ""
else:
to_tag = to_label[0]
to_entity = to_label[1:]
if is_transition_allowed(constraint_type, from_tag, from_entity,
to_tag, to_entity):
allowed.append((from_label_index, to_label_index))
return allowed
def is_transition_allowed(constraint_type: str,
from_tag: str,
from_entity: str,
to_tag: str,
to_entity: str):
"""
Given a constraint type and strings ``from_tag`` and ``to_tag`` that
represent the origin and destination of the transition, return whether
the transition is allowed under the given constraint type.
Parameters
----------
constraint_type : ``str``, required
Indicates which constraint to apply. Current choices are
"BIO", "IOB1", "BIOUL", and "BMES".
from_tag : ``str``, required
The tag that the transition originates from. For example, if the
label is ``I-PER``, the ``from_tag`` is ``I``.
from_entity: ``str``, required
The entity corresponding to the ``from_tag``. For example, if the
label is ``I-PER``, the ``from_entity`` is ``PER``.
to_tag : ``str``, required
The tag that the transition leads to. For example, if the
label is ``I-PER``, the ``to_tag`` is ``I``.
to_entity: ``str``, required
The entity corresponding to the ``to_tag``. For example, if the
label is ``I-PER``, the ``to_entity`` is ``PER``.
Returns
-------
``bool``
Whether the transition is allowed under the given ``constraint_type``.
"""
# pylint: disable=too-many-return-statements
if to_tag == "START" or from_tag == "END":
return False
if constraint_type == "BIOUL":
if from_tag == "START":
return to_tag in ('O', 'B', 'U')
if to_tag == "END":
return from_tag in ('O', 'L', 'U')
return any([
from_tag in ('O', 'L', 'U') and to_tag in ('O', 'B', 'U'),
from_tag in ('B', 'I') and to_tag in ('I', 'L') and from_entity == to_entity
])
elif constraint_type == "BIO":
if from_tag == "START":
return to_tag in ('O', 'B')
if to_tag == "END":
return from_tag in ('O', 'B', 'I')
return any([
to_tag in ('O', 'B'),
to_tag == 'I' and from_tag in ('B', 'I') and from_entity == to_entity
])
elif constraint_type == "IOB1":
if from_tag == "START":
return to_tag in ('O', 'I')
if to_tag == "END":
return from_tag in ('O', 'B', 'I')
return any([
to_tag in ('O', 'I'),
to_tag == 'B' and from_tag in ('B', 'I') and from_entity == to_entity
])
elif constraint_type == "BMES":
if from_tag == "START":
return to_tag in ('B', 'S')
if to_tag == "END":
return from_tag in ('E', 'S')
return any([
to_tag in ('B', 'S') and from_tag in ('E', 'S'),
to_tag == 'M' and from_tag == 'B' and from_entity == to_entity,
to_tag == 'E' and from_tag in ('B', 'M') and from_entity == to_entity,
])
else:
raise IOError("Unknown constraint type: {constraint_type}")
class CRF(torch.nn.Module):
def __init__(self,
num_tags: int,
constraints: List[Tuple[int, int]] = None,
include_start_end_transitions: bool = True) -> None:
super().__init__()
self.num_tags = num_tags
self.transitions = torch.nn.Parameter(torch.Tensor(num_tags, num_tags))
if constraints is None:
constraint_mask = torch.Tensor(num_tags + 2, num_tags + 2).fill_(1.)
else:
constraint_mask = torch.Tensor(num_tags + 2, num_tags + 2).fill_(0.)
for i, j in constraints:
constraint_mask[i, j] = 1.
self._constraint_mask = torch.nn.Parameter(constraint_mask, requires_grad=False)
self.include_start_end_transitions = include_start_end_transitions
if include_start_end_transitions:
self.start_transitions = torch.nn.Parameter(torch.Tensor(num_tags))
self.end_transitions = torch.nn.Parameter(torch.Tensor(num_tags))
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_normal_(self.transitions)
if self.include_start_end_transitions:
torch.nn.init.normal_(self.start_transitions)
torch.nn.init.normal_(self.end_transitions)
def _input_likelihood(self, logits: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
"""
Computes the (batch_size,) denominator term for the log-likelihood, which is the
sum of the likelihoods across all possible state sequences.
"""
batch_size, sequence_length, num_tags = logits.size()
mask = mask.float().transpose(0, 1).contiguous()
logits = logits.transpose(0, 1).contiguous()
if self.include_start_end_transitions:
alpha = self.start_transitions.view(1, num_tags) + logits[0]
else:
alpha = logits[0]
for i in range(1, sequence_length):
emit_scores = logits[i].view(batch_size, 1, num_tags)
transition_scores = self.transitions.view(1, num_tags, num_tags)
broadcast_alpha = alpha.view(batch_size, num_tags, 1)
inner = broadcast_alpha + emit_scores + transition_scores
alpha = (logsumexp(inner, 1) * mask[i].view(batch_size, 1) +
alpha * (1 - mask[i]).view(batch_size, 1))
if self.include_start_end_transitions:
stops = alpha + self.end_transitions.view(1, num_tags)
else:
stops = alpha
return logsumexp(stops)
def _joint_likelihood(self,
logits: torch.Tensor,
tags: torch.Tensor,
mask: torch.LongTensor) -> torch.Tensor:
"""
Computes the numerator term for the log-likelihood, which is just score(inputs, tags)
"""
batch_size, sequence_length, _ = logits.data.shape
logits = logits.transpose(0, 1).contiguous()
mask = mask.float().transpose(0, 1).contiguous()
tags = tags.transpose(0, 1).contiguous()
if self.include_start_end_transitions:
score = self.start_transitions.index_select(0, tags[0])
else:
score = 0.0
for i in range(sequence_length - 1):
current_tag, next_tag = tags[i], tags[i + 1]
transition_score = self.transitions[current_tag.view(-1), next_tag.view(-1)]
emit_score = logits[i].gather(1, current_tag.view(batch_size, 1)).squeeze(1)
score = score + transition_score * mask[i + 1] + emit_score * mask[i]
last_tag_index = mask.sum(0).long() - 1
last_tags = tags.gather(0, last_tag_index.view(1, batch_size)).squeeze(0)
if self.include_start_end_transitions:
last_transition_score = self.end_transitions.index_select(0, last_tags)
else:
last_transition_score = 0.0
last_inputs = logits[-1] # (batch_size, num_tags)
last_input_score = last_inputs.gather(1, last_tags.view(-1, 1)) # (batch_size, 1)
last_input_score = last_input_score.squeeze() # (batch_size,)
score = score + last_transition_score + last_input_score * mask[-1]
return score
def forward(self,
inputs: torch.Tensor,
tags: torch.Tensor,
mask: torch.ByteTensor = None) -> torch.Tensor:
"""
Computes the log likelihood.
"""
if mask is None:
mask = torch.ones(*tags.size(), dtype=torch.long)
log_denominator = self._input_likelihood(inputs, mask)
log_numerator = self._joint_likelihood(inputs, tags, mask)
return torch.sum(log_numerator - log_denominator)
def viterbi_tags(self,
logits: torch.Tensor,
mask: torch.Tensor) -> List[Tuple[List[int], float]]:
_, max_seq_length, num_tags = logits.size()
logits, mask = logits.data, mask.data
start_tag = num_tags
end_tag = num_tags + 1
transitions = torch.Tensor(num_tags + 2, num_tags + 2).fill_(-10000.)
constrained_transitions = (
self.transitions * self._constraint_mask[:num_tags, :num_tags] +
-10000.0 * (1 - self._constraint_mask[:num_tags, :num_tags])
)
transitions[:num_tags, :num_tags] = constrained_transitions.data
if self.include_start_end_transitions:
transitions[start_tag, :num_tags] = (
self.start_transitions.detach() * self._constraint_mask[start_tag, :num_tags].data +
-10000.0 * (1 - self._constraint_mask[start_tag, :num_tags].detach())
)
transitions[:num_tags, end_tag] = (
self.end_transitions.detach() * self._constraint_mask[:num_tags, end_tag].data +
-10000.0 * (1 - self._constraint_mask[:num_tags, end_tag].detach())
)
else:
transitions[start_tag, :num_tags] = (-10000.0 *
(1 - self._constraint_mask[start_tag, :num_tags].detach()))
transitions[:num_tags, end_tag] = -10000.0 * (1 - self._constraint_mask[:num_tags, end_tag].detach())
best_paths = []
tag_sequence = torch.Tensor(max_seq_length + 2, num_tags + 2)
for prediction, prediction_mask in zip(logits, mask):
sequence_length = (torch.sum(prediction_mask)).int()
tag_sequence.fill_(-10000.)
tag_sequence[0, start_tag] = 0.
tag_sequence[1:(sequence_length + 1), :num_tags] = prediction[:sequence_length]
tag_sequence[sequence_length + 1, end_tag] = 0.
viterbi_path, viterbi_score = viterbi_decode(tag_sequence[:(sequence_length + 2)], transitions)
viterbi_path = viterbi_path[1:-1]
best_paths.append((viterbi_path, viterbi_score.item()))
return best_paths
def logsumexp(tensor: torch.Tensor,
dim: int = -1,
keepdim: bool = False) -> torch.Tensor:
max_score, _ = tensor.max(dim, keepdim=keepdim)
if keepdim:
stable_vec = tensor - max_score
else:
stable_vec = tensor - max_score.unsqueeze(dim)
return max_score + (stable_vec.exp().sum(dim, keepdim=keepdim)).log()
def viterbi_decode(tag_sequence: torch.Tensor,
transition_matrix: torch.Tensor,
tag_observations=None):
sequence_length, num_tags = list(tag_sequence.size())
if tag_observations:
if len(tag_observations) != sequence_length:
raise IOError("Observations were provided, but they were not the same length "
"as the sequence. Found sequence of length: {} and evidence: {}"
.format(sequence_length, tag_observations))
else:
tag_observations = [-1 for _ in range(sequence_length)]
path_scores = []
path_indices = []
if tag_observations[0] != -1:
one_hot = torch.zeros(num_tags)
one_hot[tag_observations[0]] = 100000.
path_scores.append(one_hot)
else:
path_scores.append(tag_sequence[0, :])
for timestep in range(1, sequence_length):
summed_potentials = path_scores[timestep - 1].unsqueeze(-1) + transition_matrix
scores, paths = torch.max(summed_potentials, 0)
observation = tag_observations[timestep]
if tag_observations[timestep - 1] != -1:
if transition_matrix[tag_observations[timestep - 1], observation] < -10000:
print("The pairwise potential between tags you have passed as "
"observations is extremely unlikely. Double check your evidence "
"or transition potentials!")
if observation != -1:
one_hot = torch.zeros(num_tags)
one_hot[observation] = 100000.
path_scores.append(one_hot)
else:
path_scores.append(tag_sequence[timestep, :] + scores.squeeze())
path_indices.append(paths.squeeze())
viterbi_score, best_path = torch.max(path_scores[-1], 0)
viterbi_path = [int(best_path.numpy())]
for backward_timestep in reversed(path_indices):
viterbi_path.append(int(backward_timestep[viterbi_path[-1]]))
viterbi_path.reverse()
return viterbi_path, viterbi_score
| 38.488677
| 120
| 0.580515
| 4,207
| 32,292
| 4.180414
| 0.091752
| 0.005572
| 0.009154
| 0.011145
| 0.428328
| 0.352419
| 0.312674
| 0.241769
| 0.206004
| 0.188662
| 0
| 0.015848
| 0.306299
| 32,292
| 838
| 121
| 38.534606
| 0.769251
| 0.097021
| 0
| 0.170178
| 0
| 0
| 0.015336
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047002
| false
| 0.003241
| 0.009724
| 0.001621
| 0.121556
| 0.001621
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e61d028876011792c07b86f917e40c7cc75e894b
| 2,293
|
py
|
Python
|
src/ScreenCapLibrary/utils.py
|
davesliu/ScreenCapLibrary
|
b5537c44c740e0f43e424fb0028dbcfd0e5b0557
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/ScreenCapLibrary/utils.py
|
davesliu/ScreenCapLibrary
|
b5537c44c740e0f43e424fb0028dbcfd0e5b0557
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/ScreenCapLibrary/utils.py
|
davesliu/ScreenCapLibrary
|
b5537c44c740e0f43e424fb0028dbcfd0e5b0557
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
def _norm_path(path):
if not path:
return path
return os.path.normpath(path.replace('/', os.sep))
def _compression_value_conversion(value):
"""
PNG compression values are within range [0, 9]. This value must
be mapped to a [0-100] interval.
"""
try:
if int(value) < 0 or int(value) > 100:
raise RuntimeError("Quality argument must be of between 0 and 100.")
return 0 if int(value) == 100 else int(9 - (int(value) / 11))
except ValueError:
raise RuntimeError("Quality argument must be of type integer.")
def _pil_quality_conversion(value):
"""
The quality in Pillow is between [1, 95] and must be converted to
a [0-100] interval.
"""
try:
if int(value) < 0 or int(value) > 100:
raise RuntimeError("Quality argument must be of between 0 and 100.")
if int(value) < 1:
return 1
elif int(value) >= 95:
return 95
return int(value)
except ValueError:
raise RuntimeError("The image quality argument must be of type integer.")
class suppress_stderr(object):
def __init__(self):
# Open a null file
self.null_fd = os.open(os.devnull, os.O_RDWR)
# Save the actual stderr (2) file descriptor.
self.save_fd = os.dup(2)
def __enter__(self):
# Assign the null pointer to stderr.
os.dup2(self.null_fd, 2)
def __exit__(self, *_):
# Re-assign the real stderr back to (2)
os.dup2(self.save_fd, 2)
# Close all file descriptors
os.close(self.null_fd)
os.close(self.save_fd)
| 31.410959
| 81
| 0.647187
| 331
| 2,293
| 4.39577
| 0.416918
| 0.049485
| 0.027491
| 0.057732
| 0.190378
| 0.190378
| 0.190378
| 0.131959
| 0.131959
| 0.131959
| 0
| 0.038439
| 0.262538
| 2,293
| 72
| 82
| 31.847222
| 0.821999
| 0.418229
| 0
| 0.242424
| 0
| 0
| 0.144871
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.030303
| 0
| 0.424242
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6230fc750b387220c642ab851a058153bb25082
| 7,206
|
py
|
Python
|
pybilt/common/gaussian.py
|
blakeaw/ORBILT
|
ed402dd496534dccd00f3e75b57007d944c58c1d
|
[
"MIT"
] | 11
|
2019-07-29T16:21:53.000Z
|
2022-02-02T11:44:57.000Z
|
pybilt/common/gaussian.py
|
blakeaw/ORBILT
|
ed402dd496534dccd00f3e75b57007d944c58c1d
|
[
"MIT"
] | 11
|
2019-05-15T09:30:05.000Z
|
2021-07-19T16:49:59.000Z
|
pybilt/common/gaussian.py
|
blakeaw/ORBILT
|
ed402dd496534dccd00f3e75b57007d944c58c1d
|
[
"MIT"
] | 9
|
2019-08-12T11:14:45.000Z
|
2020-12-22T18:22:55.000Z
|
"""Define Gaussian function objects.
This module defines the Gaussian class and the GaussianRange class.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from builtins import object
import numpy as np
from six.moves import range
class Gaussian(object):
"""A Gaussian function object.
Attributes:
mean (float): The mean of the Gaussian.
std (float): The standard deviation of the Gaussian.
"""
def __init__(self, mean,std):
"""Initialize a Gaussian function object.
Args:
mean (float): Set the mean of the Gaussian.
std (float): Set the standard deviation of the Gaussian.
"""
stdinv = 1.0/std
normalc = stdinv*(1.0/np.sqrt(np.pi))
self.sigma = std
self.mean = mean
self._normconst = normalc
return
def eval(self,x_in):
"""Return the Gaussian function evaluated at the input x value.
Args:
x_in (float): The x value to evaluate the function at.
Returns:
float: The function evaluation for the Gaussian.
"""
stdinv = 1.0/self.sigma
stdinvsq = stdinv**2
normalc = self._normconst
expon = -(x_in - self.mean)**2 * (0.5*stdinvsq)
y = normalc * np.exp(expon)
return y
def reset_mean(self,new_mean):
"""Change the mean of the Gaussian function.
Args:
new_mean (float): The new mean of the Gaussian function.
"""
self.mean = new_mean
return
class GaussianRange(object):
"""Define a Gaussian function over a range.
This object is used to define a Gaussian function over a defined
finite range and store its values as evaluated at points evenly spaced
over the range. The points can then for example be used for integrating
the Gaussian function over the range using numerical quadrature.
Attributes:
mean (float): The mean of the Gaussian.
std (float): The standard deviation of the Gaussian.
upper (float): The upper boundary of the range.
lower (float): The lower boundary of the range.
npoints (int): The number of points to evaluate in the range.
"""
def __init__(self,in_range,mean,std,npoints=200):
"""Initialize the GaussianRange object.
The GaussianRange stores the values of Gaussian function with the
input mean and standard deviation evaluated at evenly spaced points
in the specified x-value range.
Args:
in_range (tuple, list): Specify the endpoints for range, e.g.
(x_start, x_end).
mean (float): The mean of the Gaussian function.
std (float): The standard deviation of the Gaussian function.
npoints (Optional[int]): The number of x-value points to
evaluate the Gaussian function for in the specified range (i.e.
in_range).
"""
x_p = np.linspace(in_range[0],in_range[1],npoints,endpoint=True)
y_p = np.zeros(npoints)
yc = 0
stdinv = 1.0/std
stdinvsq = stdinv**2
normalc = stdinv*(1.0/np.sqrt(np.pi))
for x in x_p:
expon = -(x - mean)**2 * (0.5*stdinvsq)
y = normalc * np.exp(expon)
y_p[yc]=y
yc+=1
self.x = x_p
self.y = y_p
self.sigma = std
self.mean = mean
self._normconst = normalc
self.upper = in_range[1]
self.lower = in_range[0]
self._dx = x_p[1]-x_p[0]
self.npoints = npoints
return
def get_values(self):
"""Return the x and y values for the Gaussian range function.
Returns:
tuple: The x and y values for the function, returned as (
x_values, y_values).
"""
return (self.x,self.y)
def eval(self,x_in):
"""Return the Gaussian function evaluated at the input x value.
Args:
x_in (float): The x value to evaluate the function at.
Returns:
float: The function evaluation for the Gaussian.
"""
stdinv = 1.0/self.sigma
stdinvsq = stdinv**2
normalc = self._normconst
expon = -(x_in - self.mean)**2 * (0.5*stdinvsq)
y = normalc * np.exp(expon)
return y
def integrate_range(self, lower, upper):
"""Returns the numerical integration of the Gaussian range.
This function does a simple quadrature for the Gaussian function as
evaluated on the range (or subset of the range) specified at
initialization.
Args:
lower (float): The lower boundary for the integration.
upper (float): The upper boundary for the integration.
Returns:
float: The numerical value of the Gaussian range integrated from
lower to upper.
Notes:
This function does not thoroughly check the bounds, so if upper
is less than lower the function will break.
"""
if upper>self.upper:
upper=self.upper
if lower<self.lower:
lower = self.lower
i_l = int(np.floor((lower-self.lower)/self._dx))
i_u = int(np.floor((upper-self.lower)/self._dx))
#print "i_l ",i_l," i_u ",i_u
total = 0.0
for i in range(i_l,i_u):
total+= self.y[i]*self._dx
return total
def sum_range(self, lower, upper):
"""Returns the over the Gaussian range.
This function sums the Gaussian function at the points that were
evaluated on the range (or subset of the range) specified at
initialization.
Args:
lower (float): The lower boundary for the sum.
upper (float): The upper boundary for the sum.
Returns:
float: The numerical value of the Gaussian range as summed from
lower to upper.
Notes:
This function does not thoroughly check the bounds, so if upper
is less than lower the function will break.
"""
if upper>self.upper:
upper=self.upper
if lower<self.lower:
lower = self.lower
i_l = int(np.floor((lower-self.lower)/self._dx))
i_u = int(np.floor((upper-self.lower)/self._dx))
total = 0.0
for i in range(i_l,i_u):
total+= self.y[i]
return total
def normalize(self):
"""Normalizes (by area) the Gaussian function values over the range."""
total = 0.0
for i in range(0,self.npoints):
total+=self.y[i]*self._dx
for i in range(0,self.npoints):
self.y[i]/=total
return
def reset_mean(self,new_mean):
"""Change the mean of the Gaussian function.
Args:
new_mean (float): The new mean of the Gaussian function.
Notes:
This function does not re-evaluate the Gaussian range and
therefore only affects the output of the eval function.
"""
self.mean = new_mean
return
| 30.927039
| 79
| 0.592007
| 972
| 7,206
| 4.305556
| 0.159465
| 0.073596
| 0.046595
| 0.032497
| 0.598088
| 0.564636
| 0.510155
| 0.45902
| 0.437276
| 0.393787
| 0
| 0.008871
| 0.327366
| 7,206
| 232
| 80
| 31.060345
| 0.854549
| 0.503469
| 0
| 0.659091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.113636
| false
| 0
| 0.068182
| 0
| 0.318182
| 0.011364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6257ae02757a301799ad06a407d32569d49a6d5
| 1,352
|
py
|
Python
|
django_forms/forms_project/forms_project/urls.py
|
joyliao07/django_review
|
e4311d2ccbb96646a6867e5fc426ca67a122d7ed
|
[
"MIT"
] | null | null | null |
django_forms/forms_project/forms_project/urls.py
|
joyliao07/django_review
|
e4311d2ccbb96646a6867e5fc426ca67a122d7ed
|
[
"MIT"
] | 8
|
2020-02-12T00:30:10.000Z
|
2021-06-10T18:16:37.000Z
|
django_forms/forms_project/forms_project/urls.py
|
joyliao07/django_review
|
e4311d2ccbb96646a6867e5fc426ca67a122d7ed
|
[
"MIT"
] | null | null | null |
"""forms_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from forms_app import views
urlpatterns = [
path('', views.home_view, name="home"),
path('cbv', views.CBView.as_view()),
path('cbvt', views.CBVTemplate.as_view()),
path('forms_app/', include('forms_app.urls', namespace='forms_app')),
path('showtopic', views.show_topic, name="show topic"),
path('testforms', views.testform_view, name="test forms"),
path('userprofile', views.userprofile_view, name="user profile"),
path('register', views.register, name="register"),
path('login', views.login_view, name="login"),
path('logout', views.logout_view, name="logout"),
path('admin/', admin.site.urls),
]
| 39.764706
| 77
| 0.697485
| 193
| 1,352
| 4.803109
| 0.352332
| 0.05178
| 0.016181
| 0.02589
| 0.126214
| 0.126214
| 0.080906
| 0
| 0
| 0
| 0
| 0.006987
| 0.153107
| 1,352
| 33
| 78
| 40.969697
| 0.80262
| 0.465237
| 0
| 0
| 0
| 0
| 0.208101
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1875
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e625df758f3e2fdaeb576f8377536aeeebd5b8b3
| 635
|
py
|
Python
|
extractors/folha_news_extractor.py
|
LorhanSohaky/POOA
|
c604f03f9b7bbfccecb75a982cc76fe428c36433
|
[
"MIT"
] | 1
|
2020-12-05T21:01:10.000Z
|
2020-12-05T21:01:10.000Z
|
extractors/folha_news_extractor.py
|
LorhanSohaky/POOA
|
c604f03f9b7bbfccecb75a982cc76fe428c36433
|
[
"MIT"
] | null | null | null |
extractors/folha_news_extractor.py
|
LorhanSohaky/POOA
|
c604f03f9b7bbfccecb75a982cc76fe428c36433
|
[
"MIT"
] | null | null | null |
import requests
from bs4 import BeautifulSoup
from news import News
from .abstract_news_extractor import AbstractNewsExtractor
class FolhaNewsExtractor(AbstractNewsExtractor):
def __init__(self):
super().__init__('https://www.folha.uol.com.br')
def extract_news(self):
news = []
html_text = requests.get(self.url).text
soup = BeautifulSoup(html_text, 'html.parser')
for item in soup.find_all('ul','c-tools-share__list'):
title = item.get('data-sharebar-text')
url = item.get('data-sharebar-url')
news.append(News(title,url))
return news
| 27.608696
| 62
| 0.661417
| 78
| 635
| 5.179487
| 0.551282
| 0.039604
| 0.054455
| 0.094059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002028
| 0.223622
| 635
| 23
| 63
| 27.608696
| 0.817444
| 0
| 0
| 0
| 0
| 0
| 0.149371
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.25
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|