max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
processNodeModule.py | taxusyew/Hash | 0 | 6621351 | # coding=utf-8
import os
def renameDir(curDir):
# 获得当前目录下的所有文件、文件夹
curDirs = os.listdir(curDir)
print os.listdir(curDir)
index = 0
newDirs = []
newDir = ''
# 切换为当前文件夹
os.chdir(curDir)
# 遍历当前目录下的所有文件
for single_dir in curDirs:
# 只处理文件夹
if os.path.isdir(single_dir):
newDir = 'a'+str(index)
# python list 没有 push,用 append 代替
newDirs.append(newDir)
os.rename(single_dir, newDir)
index += 1
# 递归处理子文件夹
for new_single_dir in newDirs:
# 如果当前
child_dir = os.path.join(curDir, new_single_dir)
renameDir(child_dir)
return
renameDir('D:\\code\\fountain-new\\node_modules1')
| # coding=utf-8
import os
def renameDir(curDir):
# 获得当前目录下的所有文件、文件夹
curDirs = os.listdir(curDir)
print os.listdir(curDir)
index = 0
newDirs = []
newDir = ''
# 切换为当前文件夹
os.chdir(curDir)
# 遍历当前目录下的所有文件
for single_dir in curDirs:
# 只处理文件夹
if os.path.isdir(single_dir):
newDir = 'a'+str(index)
# python list 没有 push,用 append 代替
newDirs.append(newDir)
os.rename(single_dir, newDir)
index += 1
# 递归处理子文件夹
for new_single_dir in newDirs:
# 如果当前
child_dir = os.path.join(curDir, new_single_dir)
renameDir(child_dir)
return
renameDir('D:\\code\\fountain-new\\node_modules1')
| zh | 0.960839 | # coding=utf-8 # 获得当前目录下的所有文件、文件夹 # 切换为当前文件夹 # 遍历当前目录下的所有文件 # 只处理文件夹 # python list 没有 push,用 append 代替 # 递归处理子文件夹 # 如果当前 | 2.931539 | 3 |
center_tools/raspberry.py | likicese/center-tools | 0 | 6621352 | <reponame>likicese/center-tools
from django.http import HttpResponse
from django.shortcuts import render_to_response
import os
def raspberry(request):
return render_to_response("raspberry.html")
def operation(request):
request.encoding="utf-8"
message = "啥也没有"
if request.GET["op"] == "重启":
# message = os.system("sudo reboot")
message = os.system("date")
message = "我要重启啦\n" + message
if request.GET["op"] == "关机":
# message = os.system("sudo shutdown now")
message = os.system("echo nonono")
message = "我要关机啦\n" + message
return HttpResponse(message) | from django.http import HttpResponse
from django.shortcuts import render_to_response
import os
def raspberry(request):
return render_to_response("raspberry.html")
def operation(request):
request.encoding="utf-8"
message = "啥也没有"
if request.GET["op"] == "重启":
# message = os.system("sudo reboot")
message = os.system("date")
message = "我要重启啦\n" + message
if request.GET["op"] == "关机":
# message = os.system("sudo shutdown now")
message = os.system("echo nonono")
message = "我要关机啦\n" + message
return HttpResponse(message) | en | 0.086933 | # message = os.system("sudo reboot") # message = os.system("sudo shutdown now") | 2.366019 | 2 |
src/gtcl-for-python/input.py | konekato/gtcl | 3 | 6621353 | <filename>src/gtcl-for-python/input.py
import validate
import config
def dow_input():
while True:
dow = input()
if validate.is_dow(dow):
return dow
else:
print('月〜土で、 火 のように入力してください。')
def period_input():
while True:
period = input()
if validate.is_period(period):
return period
else:
print('1〜5で、 2 のように入力してください。')
def url_input():
while True:
url = input()
if not url:
return
elif validate.is_url(url):
return url
else:
config.PRINT_UPL()
print('で始まるようにURLを登録してください。')
print('enterキーでスキップできます。\n')
| <filename>src/gtcl-for-python/input.py
import validate
import config
def dow_input():
while True:
dow = input()
if validate.is_dow(dow):
return dow
else:
print('月〜土で、 火 のように入力してください。')
def period_input():
while True:
period = input()
if validate.is_period(period):
return period
else:
print('1〜5で、 2 のように入力してください。')
def url_input():
while True:
url = input()
if not url:
return
elif validate.is_url(url):
return url
else:
config.PRINT_UPL()
print('で始まるようにURLを登録してください。')
print('enterキーでスキップできます。\n')
| none | 1 | 3.387628 | 3 | |
8.py | flpcan/project_euler | 0 | 6621354 | <reponame>flpcan/project_euler<gh_stars>0
# Online Python - IDE, Editor, Compiler, Interpreter
code = """73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450"""
lst = list(map(lambda y: int(y), code.replace("\n","")))
number = int()
count = int()
def extra(k):
return lst[count + k]
for i in lst:
count = count + 1
num = int(i) * extra(0) * extra(1) * extra(2)* extra(3)* extra(4)* extra(5)* extra(6)* extra(7)* extra(8)* extra(9)* extra(10)* extra(11)
if num > number:
number = num
print(f"Numero max es = {number} y lo forman: ", int(i),extra(0) , extra(1) ,extra(2), extra(3), extra(4), extra(5),extra(6),extra(7), extra(8),extra(9), extra(10),extra(11))
# extra(3)* extra(4)* extra(5)* extra(6)* extra(7)* extra(8)* extra(9)* extra(10)* extra(11)
| # Online Python - IDE, Editor, Compiler, Interpreter
code = """73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450"""
lst = list(map(lambda y: int(y), code.replace("\n","")))
number = int()
count = int()
def extra(k):
return lst[count + k]
for i in lst:
count = count + 1
num = int(i) * extra(0) * extra(1) * extra(2)* extra(3)* extra(4)* extra(5)* extra(6)* extra(7)* extra(8)* extra(9)* extra(10)* extra(11)
if num > number:
number = num
print(f"Numero max es = {number} y lo forman: ", int(i),extra(0) , extra(1) ,extra(2), extra(3), extra(4), extra(5),extra(6),extra(7), extra(8),extra(9), extra(10),extra(11))
# extra(3)* extra(4)* extra(5)* extra(6)* extra(7)* extra(8)* extra(9)* extra(10)* extra(11) | ru | 0.347641 | # Online Python - IDE, Editor, Compiler, Interpreter 73167176531330624919225119674426574742355349194934 96983520312774506326239578318016984801869478851843 85861560789112949495459501737958331952853208805511 12540698747158523863050715693290963295227443043557 66896648950445244523161731856403098711121722383113 62229893423380308135336276614282806444486645238749 30358907296290491560440772390713810515859307960866 70172427121883998797908792274921901699720888093776 65727333001053367881220235421809751254540594752243 52584907711670556013604839586446706324415722155397 53697817977846174064955149290862569321978468622482 83972241375657056057490261407972968652414535100474 82166370484403199890008895243450658541227588666881 16427171479924442928230863465674813919123162824586 17866458359124566529476545682848912883142607690042 24219022671055626321111109370544217506941658960408 07198403850962455444362981230987879927244284909188 84580156166097919133875499200524063689912560717606 05886116467109405077541002256983155200055935729725 71636269561882670428252483600823257530420752963450 # extra(3)* extra(4)* extra(5)* extra(6)* extra(7)* extra(8)* extra(9)* extra(10)* extra(11) | 1.537408 | 2 |
knget/dirtycode.py | urain39/KngetPy | 4 | 6621355 | """It's works, but i think it is dirty.
See also: https://github.com/benjaminp/six/issues/142
"""
try:
''.format_map({})
except AttributeError: # Python < 3.2
import string
def format_map(format_string, mapping, _format=string.Formatter().vformat):
return _format(format_string, None, mapping)
del string
#XXX works on CPython 2.6
# http://stackoverflow.com/questions/2444680/how-do-i-add-my-own-custom-attributes-to-existing-built-in-python-types-like-a/2450942#2450942
import ctypes as c
class PyObject_HEAD(c.Structure):
_fields_ = [
('HEAD', c.c_ubyte * (object.__basicsize__ - c.sizeof(c.c_void_p))),
('ob_type', c.c_void_p)
]
_get_dict = c.pythonapi._PyObject_GetDictPtr
_get_dict.restype = c.POINTER(c.py_object)
_get_dict.argtypes = [c.py_object]
def get_dict(object):
return _get_dict(object).contents.value
get_dict(str)['format_map'] = format_map
else: # Python 3.2+
def format_map(format_string, mapping):
return format_string.format_map(mapping)
| """It's works, but i think it is dirty.
See also: https://github.com/benjaminp/six/issues/142
"""
try:
''.format_map({})
except AttributeError: # Python < 3.2
import string
def format_map(format_string, mapping, _format=string.Formatter().vformat):
return _format(format_string, None, mapping)
del string
#XXX works on CPython 2.6
# http://stackoverflow.com/questions/2444680/how-do-i-add-my-own-custom-attributes-to-existing-built-in-python-types-like-a/2450942#2450942
import ctypes as c
class PyObject_HEAD(c.Structure):
_fields_ = [
('HEAD', c.c_ubyte * (object.__basicsize__ - c.sizeof(c.c_void_p))),
('ob_type', c.c_void_p)
]
_get_dict = c.pythonapi._PyObject_GetDictPtr
_get_dict.restype = c.POINTER(c.py_object)
_get_dict.argtypes = [c.py_object]
def get_dict(object):
return _get_dict(object).contents.value
get_dict(str)['format_map'] = format_map
else: # Python 3.2+
def format_map(format_string, mapping):
return format_string.format_map(mapping)
| en | 0.778498 | It's works, but i think it is dirty. See also: https://github.com/benjaminp/six/issues/142 # Python < 3.2 #XXX works on CPython 2.6 # http://stackoverflow.com/questions/2444680/how-do-i-add-my-own-custom-attributes-to-existing-built-in-python-types-like-a/2450942#2450942 # Python 3.2+ | 2.404113 | 2 |
lib/add_relation_rbn_to_fn_lu.py | cltl/Dutch_FrameNet_Lexicon | 0 | 6621356 | """
Exploit translation relation between monosemous Dutch and English lemmas
Usage:
add_relation_rbn_to_fn_lu.py --config_path=<config_path> --input_folder=<input_folder> --use_wn_polysemy=<use_wn_polysemy> --pos=<pos> --verbose=<verbose>
Options:
--config_path=<config_path>
--input_folder=<input_folder> should contain combined.p and graph.p
--use_wn_polysemy=<use_wn_polysemy> if 'True', we only include English lemma,pos combinations that are monosemous in WordNet
--pos=<pos> RBN pos to use, e.g., 'noun-verb-adjective'
--verbose=<verbose> 0 --> no stdout 1 --> general stdout 2 --> detailed stdout
Example:
python add_relation_rbn_to_fn_lu.py --config_path="../input/config_files/v0.json" --input_folder="../output/dfn_objects" --use_wn_polysemy="True" --pos="noun-verb-adjective" --verbose=1
"""
import json
import os
import pickle
import sys
from docopt import docopt
from collections import defaultdict
import networkx as nx
from nltk.corpus import wordnet as wn
from datetime import datetime
from dfn_classes import fn_pos2wn_pos
import load_utils
odwn_classes = load_utils.load_python_module(module_path='../resources/ODWN_Reader',
module_name='odwn_classes',
verbose=1)
utils = load_utils.load_python_module(module_path='../resources/ODWN_Reader',
module_name='utils',
verbose=1)
# load arguments
arguments = docopt(__doc__)
print()
print('PROVIDED ARGUMENTS')
print(arguments)
print()
config_path = arguments['--config_path']
rbn_pos = set(arguments['--pos'].split('-'))
fn_pos = {pos[0].upper()
for pos in rbn_pos}
use_wn_filter = arguments['--use_wn_polysemy'] == 'True'
verbose = int(arguments['--verbose'])
cdate = datetime.utcnow().strftime("%m/%d/%Y %H:%M:%S UTC %a")
assert os.path.exists(config_path), f'{config_path} does not exist'
configuration = json.load(open(config_path))
rbn_senseid2le_obj = pickle.load(open(configuration['path_rbn_objs'], 'rb'))
synset_id2synset_obj = pickle.load(open(configuration['path_synset_objs'], 'rb'))
pol_info_df, \
pol_df, \
lemma_pos2le_ids = utils.load_polysemy_info(rbn_senseid2le_obj,
pos=rbn_pos)
frame_objs_path = os.path.join(arguments['--input_folder'], 'combined.p')
updated_frame_objs_path = os.path.join(arguments['--input_folder'], 'combined_v1.p')
graph_path = os.path.join(arguments['--input_folder'], 'graph.p')
updated_graph_path = os.path.join(arguments['--input_folder'], 'graph_v1.p')
for path in [frame_objs_path, graph_path]:
assert os.path.exists(path), f'path {path} does not exist'
fn_obj = pickle.load(open(frame_objs_path, 'rb'))
polysemy_folder = os.path.join(arguments['--input_folder'], 'polysemy_profiles')
lemma_and_pos_en2lemma_pos_nl = defaultdict(set)
for pos in fn_pos:
polysemy_profiles = pickle.load(open(f'{polysemy_folder}/{pos}.p', 'rb'))
for (lemma_nl, pos_nl), (lemma_en, pos_en) in polysemy_profiles['m2m']:
lemma_and_pos_en2lemma_pos_nl[(lemma_en, pos_en)].add((lemma_nl, pos_nl))
# for every frame
sense_id2lu_ids = defaultdict(set)
lu_id2sense_ids = defaultdict(set)
senseid_and_luid2provenance = dict()
for frame_label, frame_obj in fn_obj.framelabel2frame_obj.items():
# for every lemma obj
for lemma_obj in frame_obj.lemma_objs:
# make sure it comes from Wiktionary and the lemma is Dutch
if all([lemma_obj.provenance == 'wiktionary',
lemma_obj.language == 'Dutch']):
# retrieve the FrameNet LU object
lu_id = lemma_obj.lu_id
lu_obj = frame_obj.lu_id2lu_obj[lu_id]
key = (lu_obj.lexeme, lu_obj.pos)
# here we will add the RBN ids to will be linked to this FrameNet Lexical Unit
to_add = set()
# if the english lemma and pos are part of the chosen polysemy profile
if key in lemma_and_pos_en2lemma_pos_nl:
dutch_lemma_pos = lemma_and_pos_en2lemma_pos_nl[key]
if use_wn_filter:
wn_pos = fn_pos2wn_pos(lu_obj.pos)
assert wn_pos != 'UNMAPPABLE', f'could not map {lu_obj.pos} to WordNet'
synsets = wn.synsets(lu_obj.lexeme, wn_pos)
if len(synsets) >= 2:
if verbose >= 2:
print(f'skipping {lu_obj.lexeme} {lu_obj.pos} because wn polysemy of {len(synsets)}')
continue
# if the dutch lemma and pos are part of the chosen polysemy profile
if (lemma_obj.lemma, lemma_obj.pos) in dutch_lemma_pos:
# what are possible RBN senses?
sense_ids = lemma_pos2le_ids[(lemma_obj.lemma, lemma_obj.pos)]
# add synonyms?
for sense_id in sense_ids:
rbn_obj = rbn_senseid2le_obj[sense_id]
sense_id2lu_ids[sense_id].add(lu_id)
lu_id2sense_ids[lu_id].add(sense_id)
senseid_and_luid2provenance[(sense_id, lu_id)] = 'Iteration-1'
#rbn_obj = rbn_senseid2le_obj[sense_id]
#if rbn_obj.synset_id:
# synset_obj = synset_id2synset_obj[rbn_obj.synset_id]
# synonyms = {le_obj.sense_id
# for le_obj in synset_obj.synonyms
# if all(['cdb2.2_Manual' in le_obj.provenance_set,
# le_obj.sense_id != sense_id])
# }
# for synonym in synonyms:
# sense_id2lu_ids[sense_id].add(lu_id)
# lu_id2sense_ids[lu_id].add(sense_id)
# senseid_and_luid2provenance[(sense_id, lu_id)] = 'TRANSLATION:Wiktionary;METHOD:ODWN-synonym-of-monosemy-RBN-FN-WN'
# update graph
g = nx.read_gpickle(graph_path)
old_num_edges = len(g.edges())
added = 0
for frame_label, frame_obj in fn_obj.framelabel2frame_obj.items():
for lu_id, lu_obj in frame_obj.lu_id2lu_obj.items():
for sense_id in lu_id2sense_ids[lu_id]:
if len(sense_id2lu_ids[sense_id]) == 1:
le_obj = rbn_senseid2le_obj[sense_id]
g.add_edge(lu_id, le_obj.short_rdf_uri)
added += 1
provenance = senseid_and_luid2provenance[(sense_id, lu_id)]
key = (le_obj.lemma, le_obj.fn_pos)
assert key in fn_obj.dutch_lemma_pos2id, f'{key} has no Dutch lemma pos id'
lemma_pos_id = fn_obj.dutch_lemma_pos2id[key]
information = {
'provenance' : provenance,
'rbn_obj' : le_obj,
'cDate': cdate,
'status' : 'Created',
'lemmaID' : lemma_pos_id
}
lu_obj.rbn_senses.append(information)
if verbose >= 2:
print(lu_id, sense_id)
new_num_edges = len(g.edges())
assert (old_num_edges + added) == new_num_edges
if verbose:
print(f'number of new edges: {added}')
print(f'written to {updated_graph_path}')
nx.write_gpickle(g, updated_graph_path)
# save to file
with open(updated_frame_objs_path, 'wb') as outfile:
pickle.dump(fn_obj, outfile)
print(f'written to {updated_frame_objs_path}')
| """
Exploit translation relation between monosemous Dutch and English lemmas
Usage:
add_relation_rbn_to_fn_lu.py --config_path=<config_path> --input_folder=<input_folder> --use_wn_polysemy=<use_wn_polysemy> --pos=<pos> --verbose=<verbose>
Options:
--config_path=<config_path>
--input_folder=<input_folder> should contain combined.p and graph.p
--use_wn_polysemy=<use_wn_polysemy> if 'True', we only include English lemma,pos combinations that are monosemous in WordNet
--pos=<pos> RBN pos to use, e.g., 'noun-verb-adjective'
--verbose=<verbose> 0 --> no stdout 1 --> general stdout 2 --> detailed stdout
Example:
python add_relation_rbn_to_fn_lu.py --config_path="../input/config_files/v0.json" --input_folder="../output/dfn_objects" --use_wn_polysemy="True" --pos="noun-verb-adjective" --verbose=1
"""
import json
import os
import pickle
import sys
from docopt import docopt
from collections import defaultdict
import networkx as nx
from nltk.corpus import wordnet as wn
from datetime import datetime
from dfn_classes import fn_pos2wn_pos
import load_utils
odwn_classes = load_utils.load_python_module(module_path='../resources/ODWN_Reader',
module_name='odwn_classes',
verbose=1)
utils = load_utils.load_python_module(module_path='../resources/ODWN_Reader',
module_name='utils',
verbose=1)
# load arguments
arguments = docopt(__doc__)
print()
print('PROVIDED ARGUMENTS')
print(arguments)
print()
config_path = arguments['--config_path']
rbn_pos = set(arguments['--pos'].split('-'))
fn_pos = {pos[0].upper()
for pos in rbn_pos}
use_wn_filter = arguments['--use_wn_polysemy'] == 'True'
verbose = int(arguments['--verbose'])
cdate = datetime.utcnow().strftime("%m/%d/%Y %H:%M:%S UTC %a")
assert os.path.exists(config_path), f'{config_path} does not exist'
configuration = json.load(open(config_path))
rbn_senseid2le_obj = pickle.load(open(configuration['path_rbn_objs'], 'rb'))
synset_id2synset_obj = pickle.load(open(configuration['path_synset_objs'], 'rb'))
pol_info_df, \
pol_df, \
lemma_pos2le_ids = utils.load_polysemy_info(rbn_senseid2le_obj,
pos=rbn_pos)
frame_objs_path = os.path.join(arguments['--input_folder'], 'combined.p')
updated_frame_objs_path = os.path.join(arguments['--input_folder'], 'combined_v1.p')
graph_path = os.path.join(arguments['--input_folder'], 'graph.p')
updated_graph_path = os.path.join(arguments['--input_folder'], 'graph_v1.p')
for path in [frame_objs_path, graph_path]:
assert os.path.exists(path), f'path {path} does not exist'
fn_obj = pickle.load(open(frame_objs_path, 'rb'))
polysemy_folder = os.path.join(arguments['--input_folder'], 'polysemy_profiles')
lemma_and_pos_en2lemma_pos_nl = defaultdict(set)
for pos in fn_pos:
polysemy_profiles = pickle.load(open(f'{polysemy_folder}/{pos}.p', 'rb'))
for (lemma_nl, pos_nl), (lemma_en, pos_en) in polysemy_profiles['m2m']:
lemma_and_pos_en2lemma_pos_nl[(lemma_en, pos_en)].add((lemma_nl, pos_nl))
# for every frame
sense_id2lu_ids = defaultdict(set)
lu_id2sense_ids = defaultdict(set)
senseid_and_luid2provenance = dict()
for frame_label, frame_obj in fn_obj.framelabel2frame_obj.items():
# for every lemma obj
for lemma_obj in frame_obj.lemma_objs:
# make sure it comes from Wiktionary and the lemma is Dutch
if all([lemma_obj.provenance == 'wiktionary',
lemma_obj.language == 'Dutch']):
# retrieve the FrameNet LU object
lu_id = lemma_obj.lu_id
lu_obj = frame_obj.lu_id2lu_obj[lu_id]
key = (lu_obj.lexeme, lu_obj.pos)
# here we will add the RBN ids to will be linked to this FrameNet Lexical Unit
to_add = set()
# if the english lemma and pos are part of the chosen polysemy profile
if key in lemma_and_pos_en2lemma_pos_nl:
dutch_lemma_pos = lemma_and_pos_en2lemma_pos_nl[key]
if use_wn_filter:
wn_pos = fn_pos2wn_pos(lu_obj.pos)
assert wn_pos != 'UNMAPPABLE', f'could not map {lu_obj.pos} to WordNet'
synsets = wn.synsets(lu_obj.lexeme, wn_pos)
if len(synsets) >= 2:
if verbose >= 2:
print(f'skipping {lu_obj.lexeme} {lu_obj.pos} because wn polysemy of {len(synsets)}')
continue
# if the dutch lemma and pos are part of the chosen polysemy profile
if (lemma_obj.lemma, lemma_obj.pos) in dutch_lemma_pos:
# what are possible RBN senses?
sense_ids = lemma_pos2le_ids[(lemma_obj.lemma, lemma_obj.pos)]
# add synonyms?
for sense_id in sense_ids:
rbn_obj = rbn_senseid2le_obj[sense_id]
sense_id2lu_ids[sense_id].add(lu_id)
lu_id2sense_ids[lu_id].add(sense_id)
senseid_and_luid2provenance[(sense_id, lu_id)] = 'Iteration-1'
#rbn_obj = rbn_senseid2le_obj[sense_id]
#if rbn_obj.synset_id:
# synset_obj = synset_id2synset_obj[rbn_obj.synset_id]
# synonyms = {le_obj.sense_id
# for le_obj in synset_obj.synonyms
# if all(['cdb2.2_Manual' in le_obj.provenance_set,
# le_obj.sense_id != sense_id])
# }
# for synonym in synonyms:
# sense_id2lu_ids[sense_id].add(lu_id)
# lu_id2sense_ids[lu_id].add(sense_id)
# senseid_and_luid2provenance[(sense_id, lu_id)] = 'TRANSLATION:Wiktionary;METHOD:ODWN-synonym-of-monosemy-RBN-FN-WN'
# update graph
g = nx.read_gpickle(graph_path)
old_num_edges = len(g.edges())
added = 0
for frame_label, frame_obj in fn_obj.framelabel2frame_obj.items():
for lu_id, lu_obj in frame_obj.lu_id2lu_obj.items():
for sense_id in lu_id2sense_ids[lu_id]:
if len(sense_id2lu_ids[sense_id]) == 1:
le_obj = rbn_senseid2le_obj[sense_id]
g.add_edge(lu_id, le_obj.short_rdf_uri)
added += 1
provenance = senseid_and_luid2provenance[(sense_id, lu_id)]
key = (le_obj.lemma, le_obj.fn_pos)
assert key in fn_obj.dutch_lemma_pos2id, f'{key} has no Dutch lemma pos id'
lemma_pos_id = fn_obj.dutch_lemma_pos2id[key]
information = {
'provenance' : provenance,
'rbn_obj' : le_obj,
'cDate': cdate,
'status' : 'Created',
'lemmaID' : lemma_pos_id
}
lu_obj.rbn_senses.append(information)
if verbose >= 2:
print(lu_id, sense_id)
new_num_edges = len(g.edges())
assert (old_num_edges + added) == new_num_edges
if verbose:
print(f'number of new edges: {added}')
print(f'written to {updated_graph_path}')
nx.write_gpickle(g, updated_graph_path)
# save to file
with open(updated_frame_objs_path, 'wb') as outfile:
pickle.dump(fn_obj, outfile)
print(f'written to {updated_frame_objs_path}')
| en | 0.434173 | Exploit translation relation between monosemous Dutch and English lemmas Usage: add_relation_rbn_to_fn_lu.py --config_path=<config_path> --input_folder=<input_folder> --use_wn_polysemy=<use_wn_polysemy> --pos=<pos> --verbose=<verbose> Options: --config_path=<config_path> --input_folder=<input_folder> should contain combined.p and graph.p --use_wn_polysemy=<use_wn_polysemy> if 'True', we only include English lemma,pos combinations that are monosemous in WordNet --pos=<pos> RBN pos to use, e.g., 'noun-verb-adjective' --verbose=<verbose> 0 --> no stdout 1 --> general stdout 2 --> detailed stdout Example: python add_relation_rbn_to_fn_lu.py --config_path="../input/config_files/v0.json" --input_folder="../output/dfn_objects" --use_wn_polysemy="True" --pos="noun-verb-adjective" --verbose=1 # load arguments # for every frame # for every lemma obj # make sure it comes from Wiktionary and the lemma is Dutch # retrieve the FrameNet LU object # here we will add the RBN ids to will be linked to this FrameNet Lexical Unit # if the english lemma and pos are part of the chosen polysemy profile # if the dutch lemma and pos are part of the chosen polysemy profile # what are possible RBN senses? # add synonyms? #rbn_obj = rbn_senseid2le_obj[sense_id] #if rbn_obj.synset_id: # synset_obj = synset_id2synset_obj[rbn_obj.synset_id] # synonyms = {le_obj.sense_id # for le_obj in synset_obj.synonyms # if all(['cdb2.2_Manual' in le_obj.provenance_set, # le_obj.sense_id != sense_id]) # } # for synonym in synonyms: # sense_id2lu_ids[sense_id].add(lu_id) # lu_id2sense_ids[lu_id].add(sense_id) # senseid_and_luid2provenance[(sense_id, lu_id)] = 'TRANSLATION:Wiktionary;METHOD:ODWN-synonym-of-monosemy-RBN-FN-WN' # update graph # save to file | 2.631911 | 3 |
segmentation/train.py | deltaautonomy/delta_perception | 1 | 6621357 | <reponame>deltaautonomy/delta_perception
r"""Main Training script for ICNet"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow as tf
from google.protobuf import text_format
from builders import model_builder
from builders import dataset_builder
from protos import pipeline_pb2
from libs.trainer import train_segmentation_model
tf.logging.set_verbosity(tf.logging.INFO)
flags = tf.app.flags
FLAGS = flags.FLAGS
# Distributed training settings
flags.DEFINE_integer('num_clones', 1,
'Number of model clones to deploy to each worker replica.'
'This should be greater than one if you want to use '
'multiple GPUs located on a single machine.')
flags.DEFINE_boolean('clone_on_cpu', False, 'Use CPUs to deploy clones.')
flags.DEFINE_integer('num_replicas', 1,
'Number of worker replicas. This typically corresponds '
'to the number of machines you are training on. Note '
'that the training will be done asynchronously.')
flags.DEFINE_integer('startup_delay_steps', 15,
'Number of training steps between replicas startup.')
flags.DEFINE_integer('num_ps_tasks', 0,
'The number of parameter servers. If the value is 0, then '
'the parameters are handled locally by the worker. It is '
'reccomended to use num_ps_tasks=num_replicas/2.')
flags.DEFINE_string('master', '', 'BNS name of the tensorflow server')
flags.DEFINE_integer('task', 0, 'The task ID. Should increment per worker '
'replica added to achieve between graph replication.')
# Training configuration settings
flags.DEFINE_string('config_path', '',
'Path to a pipeline_pb2.TrainEvalConfig config '
'file. If provided, other configs are ignored')
flags.mark_flag_as_required('config_path')
flags.DEFINE_string('logdir', '',
'Directory to save the checkpoints and training summaries.')
flags.mark_flag_as_required('logdir')
flags.DEFINE_integer('save_interval_secs', 600, # default to 5 min
'Time between successive saves of a checkpoint in secs.')
flags.DEFINE_integer('max_checkpoints_to_keep', 15, # might want to cut this down
'Number of checkpoints to keep in the `logdir`.')
# Debug flag
flags.DEFINE_boolean('image_summaries', False, '')
def main(_):
tf.gfile.MakeDirs(FLAGS.logdir)
pipeline_config = pipeline_pb2.PipelineConfig()
with tf.gfile.GFile(FLAGS.config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, pipeline_config)
model_config = pipeline_config.model
train_config = pipeline_config.train_config
input_config = pipeline_config.train_input_reader
create_model_fn = functools.partial(
model_builder.build,
model_config=model_config,
is_training=True)
create_input_fn = functools.partial(
dataset_builder.build,
input_reader_config=input_config)
is_chief = (FLAGS.task == 0)
train_segmentation_model(
create_model_fn,
create_input_fn,
train_config,
master=FLAGS.master,
task=FLAGS.task,
is_chief=is_chief,
startup_delay_steps=FLAGS.startup_delay_steps,
train_dir=FLAGS.logdir,
num_clones=FLAGS.num_clones,
num_worker_replicas=FLAGS.num_replicas,
clone_on_cpu=FLAGS.clone_on_cpu,
replica_id=FLAGS.task,
num_replicas=FLAGS.num_replicas,
num_ps_tasks=FLAGS.num_ps_tasks,
max_checkpoints_to_keep=FLAGS.max_checkpoints_to_keep,
save_interval_secs=FLAGS.save_interval_secs,
image_summaries=FLAGS.image_summaries)
if __name__ == '__main__':
tf.app.run()
| r"""Main Training script for ICNet"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow as tf
from google.protobuf import text_format
from builders import model_builder
from builders import dataset_builder
from protos import pipeline_pb2
from libs.trainer import train_segmentation_model
tf.logging.set_verbosity(tf.logging.INFO)
flags = tf.app.flags
FLAGS = flags.FLAGS
# Distributed training settings
flags.DEFINE_integer('num_clones', 1,
'Number of model clones to deploy to each worker replica.'
'This should be greater than one if you want to use '
'multiple GPUs located on a single machine.')
flags.DEFINE_boolean('clone_on_cpu', False, 'Use CPUs to deploy clones.')
flags.DEFINE_integer('num_replicas', 1,
'Number of worker replicas. This typically corresponds '
'to the number of machines you are training on. Note '
'that the training will be done asynchronously.')
flags.DEFINE_integer('startup_delay_steps', 15,
'Number of training steps between replicas startup.')
flags.DEFINE_integer('num_ps_tasks', 0,
'The number of parameter servers. If the value is 0, then '
'the parameters are handled locally by the worker. It is '
'reccomended to use num_ps_tasks=num_replicas/2.')
flags.DEFINE_string('master', '', 'BNS name of the tensorflow server')
flags.DEFINE_integer('task', 0, 'The task ID. Should increment per worker '
'replica added to achieve between graph replication.')
# Training configuration settings
flags.DEFINE_string('config_path', '',
'Path to a pipeline_pb2.TrainEvalConfig config '
'file. If provided, other configs are ignored')
flags.mark_flag_as_required('config_path')
flags.DEFINE_string('logdir', '',
'Directory to save the checkpoints and training summaries.')
flags.mark_flag_as_required('logdir')
flags.DEFINE_integer('save_interval_secs', 600, # default to 5 min
'Time between successive saves of a checkpoint in secs.')
flags.DEFINE_integer('max_checkpoints_to_keep', 15, # might want to cut this down
'Number of checkpoints to keep in the `logdir`.')
# Debug flag
flags.DEFINE_boolean('image_summaries', False, '')
def main(_):
tf.gfile.MakeDirs(FLAGS.logdir)
pipeline_config = pipeline_pb2.PipelineConfig()
with tf.gfile.GFile(FLAGS.config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, pipeline_config)
model_config = pipeline_config.model
train_config = pipeline_config.train_config
input_config = pipeline_config.train_input_reader
create_model_fn = functools.partial(
model_builder.build,
model_config=model_config,
is_training=True)
create_input_fn = functools.partial(
dataset_builder.build,
input_reader_config=input_config)
is_chief = (FLAGS.task == 0)
train_segmentation_model(
create_model_fn,
create_input_fn,
train_config,
master=FLAGS.master,
task=FLAGS.task,
is_chief=is_chief,
startup_delay_steps=FLAGS.startup_delay_steps,
train_dir=FLAGS.logdir,
num_clones=FLAGS.num_clones,
num_worker_replicas=FLAGS.num_replicas,
clone_on_cpu=FLAGS.clone_on_cpu,
replica_id=FLAGS.task,
num_replicas=FLAGS.num_replicas,
num_ps_tasks=FLAGS.num_ps_tasks,
max_checkpoints_to_keep=FLAGS.max_checkpoints_to_keep,
save_interval_secs=FLAGS.save_interval_secs,
image_summaries=FLAGS.image_summaries)
if __name__ == '__main__':
tf.app.run() | en | 0.788069 | Main Training script for ICNet # Distributed training settings # Training configuration settings # default to 5 min # might want to cut this down # Debug flag | 2.133142 | 2 |
publications/htlatex-font-converter.py | nushio3/nushio3.github.io | 0 | 6621358 | <reponame>nushio3/nushio3.github.io<filename>publications/htlatex-font-converter.py
#!/usr/bin/env python
# run this script at path:
# /usr/share/texmf/tex4ht/ht-fonts/unicode/cjk/utf8
def convert(n):
try:
fni = 'utf8song{:02x}.htf'.format(n)
fno = 'udmj{:02x}.htf'.format(n)
with(open(fni,'r')) as fpi:
con = fpi.read().split('\n')
con[0] = 'udmj{:02x} 0 255'.format(n)
con[-3] = 'udmj{:02x} 0 255'.format(n)
with(open(fno,'w')) as fpo:
fpo.write('\n'.join(con))
except:
pass
map(convert,range(1,256))
| #!/usr/bin/env python
# run this script at path:
# /usr/share/texmf/tex4ht/ht-fonts/unicode/cjk/utf8
def convert(n):
try:
fni = 'utf8song{:02x}.htf'.format(n)
fno = 'udmj{:02x}.htf'.format(n)
with(open(fni,'r')) as fpi:
con = fpi.read().split('\n')
con[0] = 'udmj{:02x} 0 255'.format(n)
con[-3] = 'udmj{:02x} 0 255'.format(n)
with(open(fno,'w')) as fpo:
fpo.write('\n'.join(con))
except:
pass
map(convert,range(1,256)) | en | 0.449595 | #!/usr/bin/env python # run this script at path: # /usr/share/texmf/tex4ht/ht-fonts/unicode/cjk/utf8 | 2.916396 | 3 |
preprocessing/prep.py | SkTim/DRS | 0 | 6621359 | # lhy
# 2017.4
import re
import json
import cPickle
def str_rep(text, rep):
rep = dict((re.escape(k), v) for k, v in rep.iteritems())
pattern = re.compile("|".join(rep.keys()))
text = pattern.sub(lambda m: rep[re.escape(m.group(0))], text)
return text
def add_relation(text, d_words):
pattern = re.compile(r'\w+_\w+')
match = set(pattern.findall(text))
# print(len(match))
n = len(d_words)
for r in match:
d_words[r] = str(n)
n += 1
d_rela = {}
for r in match:
d_rela[r] = d_words[r]
return d_words, d_rela
def movie2dict(dict_r, movie_name, recs_idx, output):
words = [x.strip('\n') for x in open(dict_r, 'r').readlines()]
d_words = {}
for i, word in enumerate(words):
d_words[word] = str(i + 1)
movie_dict = json.load(open(movie_name, 'rb'))
movie_list = {}
for key, value in movie_dict.items():
movie_list[str(value)] = key.encode('utf-8').lower()
in_list = open(recs_idx).readlines()
recs_list = [x.strip('\n').split(' ') for x in in_list]
recs = []
for i, r in enumerate(recs_list):
try:
recs.append(' '.join([d_words[movie_list[x]] for x in r]))
except:
print(i, r)
continue
# recs = [' '.join([d_words[movie_list[x]] for x in y]) for y in recs_list]
open(output, 'w').write('\n'.join(recs))
class Prep:
def __init__(self, route):
self.dict_r = route + 'dictionary.txt'
self.enti_r = route + 'entities.txt'
self.kb_r = route + 'movie_kb.txt'
self.qa_r = route + 'task1_qa/'
self.recs_r = route + 'task2_recs/'
self.pattern = re.compile(r'\w+_\w+')
def load_dict(self):
in_list = open(self.dict_r, 'r').readlines()
words = [x.strip('\n') for x in in_list]
d_words = [(x, str(i + 1)) for i, x in enumerate(words)]
self.d_words = dict(d_words)
return self.d_words
def load_enti(self):
in_list = open(self.enti_r, 'r').readlines()
entities = [x.strip('\n') for x in in_list]
d_enti = [(x, str(i + 1)) for i, x in enumerate(entities)]
self.d_enti = dict(d_enti)
# self.d_words_all = self.d_words.copy()
# n_w = len(self.d_words)
# for enti in self.d_enti:
# self.d_words_all[enti] = 'entity%s' % str(n_w)
# n_w += 1
d_enti_dict = [(x.lower(), self.d_words[x.lower()]) for x in entities]
self.d_enti_dict = dict(d_enti_dict)
return self.d_enti, self.d_enti_dict
def index_line(self, line):
if line == '':
return ''
line = line[2:].lower()
l = line.split(' ')
def index_line_kb(self, line):
if line == '':
return ''
if 'plot' in line:
line = line[:-1]
# return ''
# print(len(line))
line = line[2:].lower()
# relation = self.pattern.search(line).group()
# print(relation)
words = line.split(' ')
# enti, ans = line.split(' %s ' % relation)
# ans_enti = ans.strip('\n').split(', ')
# try:
# enti_rela_index = [str(self.d_enti[enti]), str(self.d_rela[relation])]
# ans_index = [self.d_enti[x] for x in ans_enti]
# except:
# return ''
# return ' '.join(enti_rela_index + ans_index) + '\n'
word_idx = []
search_range = [10 - x for x in range(10)]
# search_range.reverse()
i = 0
while i < len(words):
for j in search_range:
query = ' '.join(words[i : i + j]).strip(',')
if query in self.d_words:
word_idx.append(self.d_words[query])
i = i + j - 1
break
i += 1
return '%s\n' % ' '.join(word_idx)
def index_line_qa(self, line):
if line == '':
return ''
line = line[2:]#.replace(',', '')
ques, ans = line.strip('\n').split('?\t')
q_words = ques.lower().split(' ')
q_cache = []
i = 0
search_range = range(10)
search_range.reverse()
while i < len(q_words):
word = q_words[i]
s = 0
for j in search_range:
if i + j >= len(q_words):
continue
query = ' '.join(q_words[i : i + j + 1]).strip(',')
# if 'Jane?' in line:
# print(query)
if query in self.d_enti_dict:
# print(query)
s = 1
i = i + j
break
i += 1
if s == 0:
q_cache.append(self.d_words[word.replace(',', '')])
else:
q_cache.append(self.d_enti_dict[query])
ques = ' '.join(q_cache)
ans_enti = ans.split(', ')
try:
ans_idx = ' '.join([self.d_enti[x] for x in ans_enti])
except:
return ''
text = '%s ? %s\n' % (ques, ans_idx)
return text
def index_line_recs(self, line, movies):
# print(line)
line = line[2:].strip('\n')
query, recs = line.split('?\t')
q_words = query.split(' ')
# query_idx = [self.d_enti[x] for x in query_enti]
# text = '%s %s' % (' '.join(query_idx), self.d_enti[recs])
search_range = range(10)
search_range.reverse()
i = 0
query_idx = []
while i < len(q_words):
for j in search_range:
if i + j >= len(q_words):
continue
query = ' '.join(q_words[i : i + j + 1]).strip(',')
if query in movies:
query_idx.append(str(movies[query.decode('utf-8')]))
i = i + j
break
i += 1
return '%s %s\n' % (' '.join(query_idx), str(movies[recs.decode('utf-8')]))
def load_kb(self):
text = open(self.kb_r, 'r').read()
in_list = text.split('\n')
# self.d_enti_rela, self.d_rela = add_relation(text, self.d_enti)
index_list = [self.index_line_kb(line) for line in in_list]
text = ''.join(index_list)
return text
def load_qa(self, data):
in_list = open(self.qa_r + data, 'r').readlines()
# text = self.index_line_qa(in_list[0])
text = ''.join([self.index_line_qa(line) for line in in_list])
return text
def load_recs_name(self, data):
movies = json.load(open('data/movie_name_dict.json', 'rb'))
in_list = open(self.recs_r + data, 'r').readlines()
text = ''.join([self.index_line_recs(line, movies) for line in in_list])
return text
def save_dict(self, config):
if config == 'json':
pkg = json
if config == 'cPickle':
pkg = cPickle
json.dump({'d_words' : self.d_words,
'd_enti' : self.d_enti,
'd_enti_rela' : self.d_enti_rela},
open('data/enti_dict.%s' % config, 'wb'))
print('Dictionary saved') | # lhy
# 2017.4
import re
import json
import cPickle
def str_rep(text, rep):
rep = dict((re.escape(k), v) for k, v in rep.iteritems())
pattern = re.compile("|".join(rep.keys()))
text = pattern.sub(lambda m: rep[re.escape(m.group(0))], text)
return text
def add_relation(text, d_words):
pattern = re.compile(r'\w+_\w+')
match = set(pattern.findall(text))
# print(len(match))
n = len(d_words)
for r in match:
d_words[r] = str(n)
n += 1
d_rela = {}
for r in match:
d_rela[r] = d_words[r]
return d_words, d_rela
def movie2dict(dict_r, movie_name, recs_idx, output):
words = [x.strip('\n') for x in open(dict_r, 'r').readlines()]
d_words = {}
for i, word in enumerate(words):
d_words[word] = str(i + 1)
movie_dict = json.load(open(movie_name, 'rb'))
movie_list = {}
for key, value in movie_dict.items():
movie_list[str(value)] = key.encode('utf-8').lower()
in_list = open(recs_idx).readlines()
recs_list = [x.strip('\n').split(' ') for x in in_list]
recs = []
for i, r in enumerate(recs_list):
try:
recs.append(' '.join([d_words[movie_list[x]] for x in r]))
except:
print(i, r)
continue
# recs = [' '.join([d_words[movie_list[x]] for x in y]) for y in recs_list]
open(output, 'w').write('\n'.join(recs))
class Prep:
def __init__(self, route):
self.dict_r = route + 'dictionary.txt'
self.enti_r = route + 'entities.txt'
self.kb_r = route + 'movie_kb.txt'
self.qa_r = route + 'task1_qa/'
self.recs_r = route + 'task2_recs/'
self.pattern = re.compile(r'\w+_\w+')
def load_dict(self):
in_list = open(self.dict_r, 'r').readlines()
words = [x.strip('\n') for x in in_list]
d_words = [(x, str(i + 1)) for i, x in enumerate(words)]
self.d_words = dict(d_words)
return self.d_words
def load_enti(self):
in_list = open(self.enti_r, 'r').readlines()
entities = [x.strip('\n') for x in in_list]
d_enti = [(x, str(i + 1)) for i, x in enumerate(entities)]
self.d_enti = dict(d_enti)
# self.d_words_all = self.d_words.copy()
# n_w = len(self.d_words)
# for enti in self.d_enti:
# self.d_words_all[enti] = 'entity%s' % str(n_w)
# n_w += 1
d_enti_dict = [(x.lower(), self.d_words[x.lower()]) for x in entities]
self.d_enti_dict = dict(d_enti_dict)
return self.d_enti, self.d_enti_dict
def index_line(self, line):
if line == '':
return ''
line = line[2:].lower()
l = line.split(' ')
def index_line_kb(self, line):
if line == '':
return ''
if 'plot' in line:
line = line[:-1]
# return ''
# print(len(line))
line = line[2:].lower()
# relation = self.pattern.search(line).group()
# print(relation)
words = line.split(' ')
# enti, ans = line.split(' %s ' % relation)
# ans_enti = ans.strip('\n').split(', ')
# try:
# enti_rela_index = [str(self.d_enti[enti]), str(self.d_rela[relation])]
# ans_index = [self.d_enti[x] for x in ans_enti]
# except:
# return ''
# return ' '.join(enti_rela_index + ans_index) + '\n'
word_idx = []
search_range = [10 - x for x in range(10)]
# search_range.reverse()
i = 0
while i < len(words):
for j in search_range:
query = ' '.join(words[i : i + j]).strip(',')
if query in self.d_words:
word_idx.append(self.d_words[query])
i = i + j - 1
break
i += 1
return '%s\n' % ' '.join(word_idx)
def index_line_qa(self, line):
if line == '':
return ''
line = line[2:]#.replace(',', '')
ques, ans = line.strip('\n').split('?\t')
q_words = ques.lower().split(' ')
q_cache = []
i = 0
search_range = range(10)
search_range.reverse()
while i < len(q_words):
word = q_words[i]
s = 0
for j in search_range:
if i + j >= len(q_words):
continue
query = ' '.join(q_words[i : i + j + 1]).strip(',')
# if 'Jane?' in line:
# print(query)
if query in self.d_enti_dict:
# print(query)
s = 1
i = i + j
break
i += 1
if s == 0:
q_cache.append(self.d_words[word.replace(',', '')])
else:
q_cache.append(self.d_enti_dict[query])
ques = ' '.join(q_cache)
ans_enti = ans.split(', ')
try:
ans_idx = ' '.join([self.d_enti[x] for x in ans_enti])
except:
return ''
text = '%s ? %s\n' % (ques, ans_idx)
return text
def index_line_recs(self, line, movies):
# print(line)
line = line[2:].strip('\n')
query, recs = line.split('?\t')
q_words = query.split(' ')
# query_idx = [self.d_enti[x] for x in query_enti]
# text = '%s %s' % (' '.join(query_idx), self.d_enti[recs])
search_range = range(10)
search_range.reverse()
i = 0
query_idx = []
while i < len(q_words):
for j in search_range:
if i + j >= len(q_words):
continue
query = ' '.join(q_words[i : i + j + 1]).strip(',')
if query in movies:
query_idx.append(str(movies[query.decode('utf-8')]))
i = i + j
break
i += 1
return '%s %s\n' % (' '.join(query_idx), str(movies[recs.decode('utf-8')]))
def load_kb(self):
text = open(self.kb_r, 'r').read()
in_list = text.split('\n')
# self.d_enti_rela, self.d_rela = add_relation(text, self.d_enti)
index_list = [self.index_line_kb(line) for line in in_list]
text = ''.join(index_list)
return text
def load_qa(self, data):
in_list = open(self.qa_r + data, 'r').readlines()
# text = self.index_line_qa(in_list[0])
text = ''.join([self.index_line_qa(line) for line in in_list])
return text
def load_recs_name(self, data):
movies = json.load(open('data/movie_name_dict.json', 'rb'))
in_list = open(self.recs_r + data, 'r').readlines()
text = ''.join([self.index_line_recs(line, movies) for line in in_list])
return text
def save_dict(self, config):
if config == 'json':
pkg = json
if config == 'cPickle':
pkg = cPickle
json.dump({'d_words' : self.d_words,
'd_enti' : self.d_enti,
'd_enti_rela' : self.d_enti_rela},
open('data/enti_dict.%s' % config, 'wb'))
print('Dictionary saved') | en | 0.292964 | # lhy # 2017.4 # print(len(match)) # recs = [' '.join([d_words[movie_list[x]] for x in y]) for y in recs_list] # self.d_words_all = self.d_words.copy() # n_w = len(self.d_words) # for enti in self.d_enti: # self.d_words_all[enti] = 'entity%s' % str(n_w) # n_w += 1 # return '' # print(len(line)) # relation = self.pattern.search(line).group() # print(relation) # enti, ans = line.split(' %s ' % relation) # ans_enti = ans.strip('\n').split(', ') # try: # enti_rela_index = [str(self.d_enti[enti]), str(self.d_rela[relation])] # ans_index = [self.d_enti[x] for x in ans_enti] # except: # return '' # return ' '.join(enti_rela_index + ans_index) + '\n' # search_range.reverse() #.replace(',', '') # if 'Jane?' in line: # print(query) # print(query) # print(line) # query_idx = [self.d_enti[x] for x in query_enti] # text = '%s %s' % (' '.join(query_idx), self.d_enti[recs]) # self.d_enti_rela, self.d_rela = add_relation(text, self.d_enti) # text = self.index_line_qa(in_list[0]) | 3.071045 | 3 |
tests/unit/models/ar.py | RaenonX/Jelly-Bot-API | 5 | 6621360 | from typing import Tuple, Dict, Any, Type, List
from datetime import datetime
from bson import ObjectId
from django.utils import timezone
from extutils.color import ColorFactory
from flags import AutoReplyContentType, ModelValidityCheckResult
from models import (
Model, AutoReplyModuleModel, AutoReplyContentModel, AutoReplyModuleExecodeModel, AutoReplyModuleTagModel
)
from models.exceptions import ModelConstructionError, InvalidModelError, InvalidModelFieldError
from tests.base import TestModel
__all__ = ["TestAutoReplyModuleModel", "TestAutoReplyContentModel", "TestAutoReplyModuleExecodeModel",
"TestAutoReplyModuleTagModel"]
class TestAutoReplyContentModel(TestModel.TestClass):
KEY_SKIP_CHECK_INVALID = {("t", "ContentType")}
@classmethod
def get_model_class(cls) -> Type[Model]:
return AutoReplyContentModel
@classmethod
def get_default(cls) -> Dict[Tuple[str, str], Tuple[Any, Any]]:
return {("t", "ContentType"): (AutoReplyContentType.TEXT, AutoReplyContentType.IMAGE)}
@classmethod
def get_required(cls) -> Dict[Tuple[str, str], Any]:
return {("c", "Content"): "https://i.imgur.com/p7qm0vx.jpg"}
@classmethod
def get_invalid(cls) -> List[Tuple[Dict[Tuple[str, str], Any], Type[ModelConstructionError]]]:
return [
(
{("c", "Content"): "87A", ("t", "ContentType"): AutoReplyContentType.LINE_STICKER},
InvalidModelError
),
(
{("c", "Content"): "https://google.com", ("t", "ContentType"): AutoReplyContentType.IMAGE},
InvalidModelError
),
(
{("c", "Content"): "", ("t", "ContentType"): AutoReplyContentType.TEXT},
InvalidModelFieldError
)
]
def test_validity_check(self):
data = (
(AutoReplyContentType.IMAGE, "https://i.imgur.com/o4vvhXy.jpg", "https://google.com",
ModelValidityCheckResult.X_AR_CONTENT_NOT_IMAGE),
(AutoReplyContentType.TEXT, "A", "", None),
(AutoReplyContentType.LINE_STICKER, "34404512", "87A",
ModelValidityCheckResult.X_AR_CONTENT_NOT_LINE_STICKER)
)
for content_type, content_init, content_new, validity_result in data:
with self.subTest(content_type=content_type, content_new=content_new):
mdl = self.get_constructed_model(c=content_init, t=content_type)
# Some exception may happen when setting the value, this is the expected behavior
try:
mdl.content = content_new
except Exception:
continue
# If the content can be set, it should invalidate the model
actual_result = mdl.perform_validity_check()
self.assertEqual(actual_result, validity_result, actual_result)
def test_content_html(self):
"""Only testing if the content can be outputted without exception."""
mdls = (
AutoReplyContentModel(Content="X", ContentType=AutoReplyContentType.TEXT),
AutoReplyContentModel(Content="https://i.imgur.com/o4vvhXy.jpg", ContentType=AutoReplyContentType.IMAGE),
AutoReplyContentModel(Content="34404512", ContentType=AutoReplyContentType.LINE_STICKER),
)
for mdl in mdls:
self.assertIsNotNone(mdl.content_html)
self.assertIsNotNone(str(mdl))
channel_oid = ObjectId()
creator_oid = ObjectId()
remover_oid = ObjectId()
refer_oid = ObjectId()
excluded_1 = ObjectId()
tag_1 = ObjectId()
last_used = datetime(2020, 5, 10, 10, 36, tzinfo=timezone.utc)
remove_at = datetime(2020, 5, 20, 10, 36, tzinfo=timezone.utc)
class TestAutoReplyModuleModel(TestModel.TestClass):
@classmethod
def get_model_class(cls) -> Type[Model]:
return AutoReplyModuleModel
@classmethod
def get_optional(cls) -> Dict[Tuple[str, str], Any]:
return {
("rid", "ReferTo"): refer_oid,
("rmv", "RemoverOid"): remover_oid
}
@classmethod
def get_default(cls) -> Dict[Tuple[str, str], Tuple[Any, Any]]:
return {
("at", "Active"): (True, False),
("p", "Pinned"): (False, True),
("pr", "Private"): (False, True),
("cd", "CooldownSec"): (0, 10),
("e", "ExcludedOids"): ([], [excluded_1]),
("t", "TagIds"): ([], [tag_1]),
("c", "CalledCount"): (0, 10),
("l", "LastUsed"): (None, last_used),
("rm", "RemovedAt"): (None, remove_at)
}
@classmethod
def get_required(cls) -> Dict[Tuple[str, str], Any]:
return {
("kw", "Keyword"): {"c": "ABC", "t": AutoReplyContentType.TEXT},
("rp", "Responses"): [{"c": "DEF", "t": AutoReplyContentType.TEXT}],
("ch", "ChannelOid"): channel_oid,
("cr", "CreatorOid"): creator_oid
}
def test_refer(self):
mdl = self.get_constructed_model()
oid = ObjectId()
mdl.refer_to = oid
self.assertTrue(mdl.is_reference)
self.assertEqual(mdl.refer_oid, oid)
def test_kw_repr(self):
mdl = self.get_constructed_model()
self.assertIsNotNone(mdl.keyword_repr)
def test_last_used_repr(self):
mdl = self.get_constructed_model()
self.assertIsNone(mdl.last_used)
mdl.last_used = last_used
self.assertEqual(mdl.last_used, last_used)
def test_remove_at_repr(self):
mdl = self.get_constructed_model()
self.assertIsNone(mdl.removed_at)
mdl.removed_at = remove_at
self.assertEqual(mdl.removed_at, remove_at)
class TestAutoReplyModuleExecodeModel(TestModel.TestClass):
@classmethod
def get_model_class(cls) -> Type[Model]:
return AutoReplyModuleExecodeModel
@classmethod
def get_required(cls) -> Dict[Tuple[str, str], Any]:
return {
("kw", "Keyword"): {"c": "ABC", "t": AutoReplyContentType.TEXT},
("rp", "Responses"): [{"c": "DEF", "t": AutoReplyContentType.TEXT}],
}
@classmethod
def get_default(cls) -> Dict[Tuple[str, str], Tuple[Any, Any]]:
return {
("p", "Pinned"): (False, True),
("pr", "Private"): (False, True),
("cd", "CooldownSec"): (0, 10),
("t", "TagIds"): ([], [tag_1]),
}
class TestAutoReplyModuleTagModel(TestModel.TestClass):
@classmethod
def get_model_class(cls) -> Type[Model]:
return AutoReplyModuleTagModel
@classmethod
def get_required(cls) -> Dict[Tuple[str, str], Any]:
return {("n", "Name"): "Tag"}
@classmethod
def get_default(cls) -> Dict[Tuple[str, str], Tuple[Any, Any]]:
return {("c", "Color"): (ColorFactory.DEFAULT, ColorFactory.WHITE)}
| from typing import Tuple, Dict, Any, Type, List
from datetime import datetime
from bson import ObjectId
from django.utils import timezone
from extutils.color import ColorFactory
from flags import AutoReplyContentType, ModelValidityCheckResult
from models import (
Model, AutoReplyModuleModel, AutoReplyContentModel, AutoReplyModuleExecodeModel, AutoReplyModuleTagModel
)
from models.exceptions import ModelConstructionError, InvalidModelError, InvalidModelFieldError
from tests.base import TestModel
__all__ = ["TestAutoReplyModuleModel", "TestAutoReplyContentModel", "TestAutoReplyModuleExecodeModel",
"TestAutoReplyModuleTagModel"]
class TestAutoReplyContentModel(TestModel.TestClass):
KEY_SKIP_CHECK_INVALID = {("t", "ContentType")}
@classmethod
def get_model_class(cls) -> Type[Model]:
return AutoReplyContentModel
@classmethod
def get_default(cls) -> Dict[Tuple[str, str], Tuple[Any, Any]]:
return {("t", "ContentType"): (AutoReplyContentType.TEXT, AutoReplyContentType.IMAGE)}
@classmethod
def get_required(cls) -> Dict[Tuple[str, str], Any]:
return {("c", "Content"): "https://i.imgur.com/p7qm0vx.jpg"}
@classmethod
def get_invalid(cls) -> List[Tuple[Dict[Tuple[str, str], Any], Type[ModelConstructionError]]]:
return [
(
{("c", "Content"): "87A", ("t", "ContentType"): AutoReplyContentType.LINE_STICKER},
InvalidModelError
),
(
{("c", "Content"): "https://google.com", ("t", "ContentType"): AutoReplyContentType.IMAGE},
InvalidModelError
),
(
{("c", "Content"): "", ("t", "ContentType"): AutoReplyContentType.TEXT},
InvalidModelFieldError
)
]
def test_validity_check(self):
data = (
(AutoReplyContentType.IMAGE, "https://i.imgur.com/o4vvhXy.jpg", "https://google.com",
ModelValidityCheckResult.X_AR_CONTENT_NOT_IMAGE),
(AutoReplyContentType.TEXT, "A", "", None),
(AutoReplyContentType.LINE_STICKER, "34404512", "87A",
ModelValidityCheckResult.X_AR_CONTENT_NOT_LINE_STICKER)
)
for content_type, content_init, content_new, validity_result in data:
with self.subTest(content_type=content_type, content_new=content_new):
mdl = self.get_constructed_model(c=content_init, t=content_type)
# Some exception may happen when setting the value, this is the expected behavior
try:
mdl.content = content_new
except Exception:
continue
# If the content can be set, it should invalidate the model
actual_result = mdl.perform_validity_check()
self.assertEqual(actual_result, validity_result, actual_result)
def test_content_html(self):
"""Only testing if the content can be outputted without exception."""
mdls = (
AutoReplyContentModel(Content="X", ContentType=AutoReplyContentType.TEXT),
AutoReplyContentModel(Content="https://i.imgur.com/o4vvhXy.jpg", ContentType=AutoReplyContentType.IMAGE),
AutoReplyContentModel(Content="34404512", ContentType=AutoReplyContentType.LINE_STICKER),
)
for mdl in mdls:
self.assertIsNotNone(mdl.content_html)
self.assertIsNotNone(str(mdl))
channel_oid = ObjectId()
creator_oid = ObjectId()
remover_oid = ObjectId()
refer_oid = ObjectId()
excluded_1 = ObjectId()
tag_1 = ObjectId()
last_used = datetime(2020, 5, 10, 10, 36, tzinfo=timezone.utc)
remove_at = datetime(2020, 5, 20, 10, 36, tzinfo=timezone.utc)
class TestAutoReplyModuleModel(TestModel.TestClass):
@classmethod
def get_model_class(cls) -> Type[Model]:
return AutoReplyModuleModel
@classmethod
def get_optional(cls) -> Dict[Tuple[str, str], Any]:
return {
("rid", "ReferTo"): refer_oid,
("rmv", "RemoverOid"): remover_oid
}
@classmethod
def get_default(cls) -> Dict[Tuple[str, str], Tuple[Any, Any]]:
return {
("at", "Active"): (True, False),
("p", "Pinned"): (False, True),
("pr", "Private"): (False, True),
("cd", "CooldownSec"): (0, 10),
("e", "ExcludedOids"): ([], [excluded_1]),
("t", "TagIds"): ([], [tag_1]),
("c", "CalledCount"): (0, 10),
("l", "LastUsed"): (None, last_used),
("rm", "RemovedAt"): (None, remove_at)
}
@classmethod
def get_required(cls) -> Dict[Tuple[str, str], Any]:
return {
("kw", "Keyword"): {"c": "ABC", "t": AutoReplyContentType.TEXT},
("rp", "Responses"): [{"c": "DEF", "t": AutoReplyContentType.TEXT}],
("ch", "ChannelOid"): channel_oid,
("cr", "CreatorOid"): creator_oid
}
def test_refer(self):
mdl = self.get_constructed_model()
oid = ObjectId()
mdl.refer_to = oid
self.assertTrue(mdl.is_reference)
self.assertEqual(mdl.refer_oid, oid)
def test_kw_repr(self):
mdl = self.get_constructed_model()
self.assertIsNotNone(mdl.keyword_repr)
def test_last_used_repr(self):
mdl = self.get_constructed_model()
self.assertIsNone(mdl.last_used)
mdl.last_used = last_used
self.assertEqual(mdl.last_used, last_used)
def test_remove_at_repr(self):
mdl = self.get_constructed_model()
self.assertIsNone(mdl.removed_at)
mdl.removed_at = remove_at
self.assertEqual(mdl.removed_at, remove_at)
class TestAutoReplyModuleExecodeModel(TestModel.TestClass):
@classmethod
def get_model_class(cls) -> Type[Model]:
return AutoReplyModuleExecodeModel
@classmethod
def get_required(cls) -> Dict[Tuple[str, str], Any]:
return {
("kw", "Keyword"): {"c": "ABC", "t": AutoReplyContentType.TEXT},
("rp", "Responses"): [{"c": "DEF", "t": AutoReplyContentType.TEXT}],
}
@classmethod
def get_default(cls) -> Dict[Tuple[str, str], Tuple[Any, Any]]:
return {
("p", "Pinned"): (False, True),
("pr", "Private"): (False, True),
("cd", "CooldownSec"): (0, 10),
("t", "TagIds"): ([], [tag_1]),
}
class TestAutoReplyModuleTagModel(TestModel.TestClass):
@classmethod
def get_model_class(cls) -> Type[Model]:
return AutoReplyModuleTagModel
@classmethod
def get_required(cls) -> Dict[Tuple[str, str], Any]:
return {("n", "Name"): "Tag"}
@classmethod
def get_default(cls) -> Dict[Tuple[str, str], Tuple[Any, Any]]:
return {("c", "Color"): (ColorFactory.DEFAULT, ColorFactory.WHITE)}
| en | 0.722807 | # Some exception may happen when setting the value, this is the expected behavior # If the content can be set, it should invalidate the model Only testing if the content can be outputted without exception. | 2.125171 | 2 |
ruledxml/tests/test_order.py | meisterluk/ruledxml | 0 | 6621361 | <reponame>meisterluk/ruledxml<filename>ruledxml/tests/test_order.py
#!/usr/bin/env python3
import io
import unittest
import ruledxml
from . import utils
class TestRuledXmlOrder(unittest.TestCase):
def test_030(self):
result = io.BytesIO()
with open(utils.data('030_source.xml')) as src:
ruledxml.run(src, utils.data('030_rules.py'), result)
with open(utils.data('030_target.xml'), 'rb') as target:
utils.xmlEquals(self, result.getvalue(), target.read())
def test_031(self):
result = io.BytesIO()
with open(utils.data('031_source.xml')) as src:
ruledxml.run(src, utils.data('031_rules.py'), result)
with open(utils.data('031_target.xml'), 'rb') as target:
utils.xmlEquals(self, result.getvalue(), target.read())
def run():
unittest.main()
if __name__ == '__main__':
run()
| #!/usr/bin/env python3
import io
import unittest
import ruledxml
from . import utils
class TestRuledXmlOrder(unittest.TestCase):
def test_030(self):
result = io.BytesIO()
with open(utils.data('030_source.xml')) as src:
ruledxml.run(src, utils.data('030_rules.py'), result)
with open(utils.data('030_target.xml'), 'rb') as target:
utils.xmlEquals(self, result.getvalue(), target.read())
def test_031(self):
result = io.BytesIO()
with open(utils.data('031_source.xml')) as src:
ruledxml.run(src, utils.data('031_rules.py'), result)
with open(utils.data('031_target.xml'), 'rb') as target:
utils.xmlEquals(self, result.getvalue(), target.read())
def run():
unittest.main()
if __name__ == '__main__':
run() | fr | 0.221828 | #!/usr/bin/env python3 | 2.376512 | 2 |
main.py | jh-jeong/selective-convolution | 23 | 6621362 | <reponame>jh-jeong/selective-convolution<filename>main.py
from __future__ import division
import sys
import json
import os
import time
import math
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim
import models
from datasets import get_dataset
from utils import Logger
from utils import AverageMeter
from utils import save_checkpoint
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
cudnn.benchmark = True
def update_learning_rate(optimizer, epoch, args, cur_batch, num_batches):
lr_init = args.get('lr_init', 0.1)
num_epochs = args['num_epochs']
T_total = num_epochs * num_batches
T_cur = (epoch % num_epochs) * num_batches + cur_batch
lr = 0.5 * lr_init * (1 + math.cos(math.pi * T_cur / T_total))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def error_k(output, target, ks=(1,)):
"""Computes the precision@k for the specified values of k"""
max_k = max(ks)
batch_size = target.size(0)
_, pred = output.topk(max_k, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
results = []
for k in ks:
correct_k = correct[:k].view(-1).float().sum(0)
results.append(100.0 - correct_k.mul_(100.0 / batch_size))
return results
def train(epoch, model, criterion, optimizer, dataloader, logger, args):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
error_top1 = AverageMeter()
error_top5 = AverageMeter()
# Switch to train mode
model.train()
num_batches = len(dataloader)
check = time.time()
for n, (images, labels) in enumerate(dataloader):
images, labels = images.to(device), labels.to(device)
data_time.update(time.time() - check)
lr = update_learning_rate(optimizer, epoch, args, n, num_batches)
check = time.time()
outputs = model(images)
loss = criterion(outputs, labels)
# Compute gradient and do SGD step
model.zero_grad()
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Measure elapsed time
batch_time.update(time.time() - check)
# Measure accuracy and record loss
top1, top5 = error_k(outputs.data, labels, ks=(1, 5))
batch_size = images.size(0)
losses.update(loss.item(), batch_size)
error_top1.update(top1.item(), batch_size)
error_top5.update(top5.item(), batch_size)
if n % 10 == 0:
logger.log('[Epoch %3d; %3d] [Time %.3f] [Data %.3f] [Loss %f] [LR %.3f]' %
(epoch, n, batch_time.value, data_time.value, losses.value, lr))
check = time.time()
logger.log('[DONE] [Time %.3f] [Data %.3f] [Loss %f] [Train@1 %.3f] [Train@5 %.3f]' %
(batch_time.average, data_time.average, losses.average,
error_top1.average, error_top5.average))
logger.scalar_summary('loss', losses.average, epoch)
logger.scalar_summary('train_1', error_top1.average, epoch)
logger.scalar_summary('batch_time', batch_time.average, epoch)
def test(epoch, model, criterion, dataloader, logger=None):
batch_time = AverageMeter()
losses = AverageMeter()
error_top1 = AverageMeter()
error_top5 = AverageMeter()
# Switch to eval mode
model.eval()
with torch.no_grad():
for n, (images, labels) in enumerate(dataloader):
images, labels = images.to(device), labels.to(device)
check = time.time()
outputs = model(images)
loss = criterion(outputs, labels)
# Measure elapsed time
batch_time.update(time.time() - check)
# Measure accuracy and record loss
top1, top5 = error_k(outputs.data, labels, ks=(1, 5))
batch_size = images.size(0)
losses.update(loss.item(), batch_size)
error_top1.update(top1.item(), batch_size)
error_top5.update(top5.item(), batch_size)
if n % 10 == 0:
if logger:
logger.log('[Test %3d] [Time %.3f] [Loss %f] [Test@1 %.3f] [Test@5 %.3f]' %
(n, batch_time.value, losses.value, error_top1.value, error_top5.value))
else:
print('[Test %3d] [Time %.3f] [Loss %f] [Test@1 %.3f] [Test@5 %.3f]' %
(n, batch_time.value, losses.value, error_top1.value, error_top5.value))
if logger:
logger.log(' * [Error@1 %.3f] [Error@5 %.3f] [Loss %.3f]' %
(error_top1.average, error_top5.average, losses.average))
logger.scalar_summary('error_1', error_top1.average, epoch)
logger.scalar_summary('error_5', error_top5.average, epoch)
logger.scalar_summary('loss_test', losses.average, epoch)
return error_top1.average
def main(args, fn):
logger = Logger(fn)
hparams = args['model_hparams']
if args['dataset'] in ['cifar10', 'fmnist']:
hparams['n_classes'] = 10
elif args['dataset'] == 'cifar100':
hparams['n_classes'] = 100
elif args['dataset'] == 'tinyimg':
hparams['n_classes'] = 200
elif args['dataset'] == 'imagenet':
hparams['n_classes'] = 1000
logger.log(args)
hparams['dataset'] = args['dataset']
model = models.__dict__[args['model']](hparams)
logger.log(model)
if torch.cuda.is_available():
n_gpus = torch.cuda.device_count()
if n_gpus > 1:
logger.log('Multi-GPU mode: using %d GPUs for training.' % n_gpus)
model = nn.DataParallel(model).cuda()
else:
logger.log('Single-GPU mode.')
model = model.cuda()
else:
n_gpus = 0
# Configure parameters to optimize
pg_normal = []
pg_small = []
for p in model.parameters():
if not p.requires_grad:
continue
elif hasattr(p, 'wd_small') and p.wd_small:
pg_small.append(p)
else:
pg_normal.append(p)
params = [
{'params': pg_normal, 'weight_decay': 1e-4},
{'params': pg_small, 'weight_decay': 1e-5}
]
# Define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().to(device)
optimizer = torch.optim.SGD(params,
lr=args.get('lr_init', 0.1),
momentum=args.get('momentum', 0.9),
nesterov=True)
train_set, test_set = get_dataset(args['dataset'])
n_workers = max(8*n_gpus, 4)
train_loader = torch.utils.data.DataLoader(train_set,
shuffle=True,
pin_memory=True,
batch_size=args['batch_size'],
num_workers=n_workers)
test_loader = torch.utils.data.DataLoader(test_set,
shuffle=False,
pin_memory=True,
batch_size=args['batch_size'],
num_workers=n_workers)
best = 100.0
for epoch in range(args['num_epochs']):
train(epoch, model, criterion, optimizer, train_loader, logger, args)
error = test(epoch, model, criterion, test_loader, logger)
# Perform dealloc/realloc for SelectiveConv2d modules
for m in model.modules():
if type(m).__name__ in ['SelectiveConv2d']:
if epoch < 0.5 * args['num_epochs']:
m.dealloc()
m.realloc()
if isinstance(model, nn.DataParallel):
save_states = model.module.state_dict()
else:
save_states = model.state_dict()
is_best = (best > error)
if is_best:
best = error
save_checkpoint(epoch, args, best,
save_states, optimizer.state_dict(),
logger.logdir, is_best)
logger.scalar_summary('best', best, epoch)
logger.log('[Epoch %3d] [Test %5.2f] [Best %5.2f]' % (epoch, error, best))
if __name__ == '__main__':
config_path = sys.argv[1]
with open(config_path) as file:
config = file.read()
print(config)
args = json.loads(config)
config_fn = os.path.split(config_path)[-1].split('.')[0]
main(args, config_fn)
| from __future__ import division
import sys
import json
import os
import time
import math
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim
import models
from datasets import get_dataset
from utils import Logger
from utils import AverageMeter
from utils import save_checkpoint
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
cudnn.benchmark = True
def update_learning_rate(optimizer, epoch, args, cur_batch, num_batches):
lr_init = args.get('lr_init', 0.1)
num_epochs = args['num_epochs']
T_total = num_epochs * num_batches
T_cur = (epoch % num_epochs) * num_batches + cur_batch
lr = 0.5 * lr_init * (1 + math.cos(math.pi * T_cur / T_total))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def error_k(output, target, ks=(1,)):
"""Computes the precision@k for the specified values of k"""
max_k = max(ks)
batch_size = target.size(0)
_, pred = output.topk(max_k, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
results = []
for k in ks:
correct_k = correct[:k].view(-1).float().sum(0)
results.append(100.0 - correct_k.mul_(100.0 / batch_size))
return results
def train(epoch, model, criterion, optimizer, dataloader, logger, args):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
error_top1 = AverageMeter()
error_top5 = AverageMeter()
# Switch to train mode
model.train()
num_batches = len(dataloader)
check = time.time()
for n, (images, labels) in enumerate(dataloader):
images, labels = images.to(device), labels.to(device)
data_time.update(time.time() - check)
lr = update_learning_rate(optimizer, epoch, args, n, num_batches)
check = time.time()
outputs = model(images)
loss = criterion(outputs, labels)
# Compute gradient and do SGD step
model.zero_grad()
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Measure elapsed time
batch_time.update(time.time() - check)
# Measure accuracy and record loss
top1, top5 = error_k(outputs.data, labels, ks=(1, 5))
batch_size = images.size(0)
losses.update(loss.item(), batch_size)
error_top1.update(top1.item(), batch_size)
error_top5.update(top5.item(), batch_size)
if n % 10 == 0:
logger.log('[Epoch %3d; %3d] [Time %.3f] [Data %.3f] [Loss %f] [LR %.3f]' %
(epoch, n, batch_time.value, data_time.value, losses.value, lr))
check = time.time()
logger.log('[DONE] [Time %.3f] [Data %.3f] [Loss %f] [Train@1 %.3f] [Train@5 %.3f]' %
(batch_time.average, data_time.average, losses.average,
error_top1.average, error_top5.average))
logger.scalar_summary('loss', losses.average, epoch)
logger.scalar_summary('train_1', error_top1.average, epoch)
logger.scalar_summary('batch_time', batch_time.average, epoch)
def test(epoch, model, criterion, dataloader, logger=None):
batch_time = AverageMeter()
losses = AverageMeter()
error_top1 = AverageMeter()
error_top5 = AverageMeter()
# Switch to eval mode
model.eval()
with torch.no_grad():
for n, (images, labels) in enumerate(dataloader):
images, labels = images.to(device), labels.to(device)
check = time.time()
outputs = model(images)
loss = criterion(outputs, labels)
# Measure elapsed time
batch_time.update(time.time() - check)
# Measure accuracy and record loss
top1, top5 = error_k(outputs.data, labels, ks=(1, 5))
batch_size = images.size(0)
losses.update(loss.item(), batch_size)
error_top1.update(top1.item(), batch_size)
error_top5.update(top5.item(), batch_size)
if n % 10 == 0:
if logger:
logger.log('[Test %3d] [Time %.3f] [Loss %f] [Test@1 %.3f] [Test@5 %.3f]' %
(n, batch_time.value, losses.value, error_top1.value, error_top5.value))
else:
print('[Test %3d] [Time %.3f] [Loss %f] [Test@1 %.3f] [Test@5 %.3f]' %
(n, batch_time.value, losses.value, error_top1.value, error_top5.value))
if logger:
logger.log(' * [Error@1 %.3f] [Error@5 %.3f] [Loss %.3f]' %
(error_top1.average, error_top5.average, losses.average))
logger.scalar_summary('error_1', error_top1.average, epoch)
logger.scalar_summary('error_5', error_top5.average, epoch)
logger.scalar_summary('loss_test', losses.average, epoch)
return error_top1.average
def main(args, fn):
logger = Logger(fn)
hparams = args['model_hparams']
if args['dataset'] in ['cifar10', 'fmnist']:
hparams['n_classes'] = 10
elif args['dataset'] == 'cifar100':
hparams['n_classes'] = 100
elif args['dataset'] == 'tinyimg':
hparams['n_classes'] = 200
elif args['dataset'] == 'imagenet':
hparams['n_classes'] = 1000
logger.log(args)
hparams['dataset'] = args['dataset']
model = models.__dict__[args['model']](hparams)
logger.log(model)
if torch.cuda.is_available():
n_gpus = torch.cuda.device_count()
if n_gpus > 1:
logger.log('Multi-GPU mode: using %d GPUs for training.' % n_gpus)
model = nn.DataParallel(model).cuda()
else:
logger.log('Single-GPU mode.')
model = model.cuda()
else:
n_gpus = 0
# Configure parameters to optimize
pg_normal = []
pg_small = []
for p in model.parameters():
if not p.requires_grad:
continue
elif hasattr(p, 'wd_small') and p.wd_small:
pg_small.append(p)
else:
pg_normal.append(p)
params = [
{'params': pg_normal, 'weight_decay': 1e-4},
{'params': pg_small, 'weight_decay': 1e-5}
]
# Define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().to(device)
optimizer = torch.optim.SGD(params,
lr=args.get('lr_init', 0.1),
momentum=args.get('momentum', 0.9),
nesterov=True)
train_set, test_set = get_dataset(args['dataset'])
n_workers = max(8*n_gpus, 4)
train_loader = torch.utils.data.DataLoader(train_set,
shuffle=True,
pin_memory=True,
batch_size=args['batch_size'],
num_workers=n_workers)
test_loader = torch.utils.data.DataLoader(test_set,
shuffle=False,
pin_memory=True,
batch_size=args['batch_size'],
num_workers=n_workers)
best = 100.0
for epoch in range(args['num_epochs']):
train(epoch, model, criterion, optimizer, train_loader, logger, args)
error = test(epoch, model, criterion, test_loader, logger)
# Perform dealloc/realloc for SelectiveConv2d modules
for m in model.modules():
if type(m).__name__ in ['SelectiveConv2d']:
if epoch < 0.5 * args['num_epochs']:
m.dealloc()
m.realloc()
if isinstance(model, nn.DataParallel):
save_states = model.module.state_dict()
else:
save_states = model.state_dict()
is_best = (best > error)
if is_best:
best = error
save_checkpoint(epoch, args, best,
save_states, optimizer.state_dict(),
logger.logdir, is_best)
logger.scalar_summary('best', best, epoch)
logger.log('[Epoch %3d] [Test %5.2f] [Best %5.2f]' % (epoch, error, best))
if __name__ == '__main__':
config_path = sys.argv[1]
with open(config_path) as file:
config = file.read()
print(config)
args = json.loads(config)
config_fn = os.path.split(config_path)[-1].split('.')[0]
main(args, config_fn) | en | 0.626182 | Computes the precision@k for the specified values of k # Switch to train mode # Compute gradient and do SGD step # Measure elapsed time # Measure accuracy and record loss # Switch to eval mode # Measure elapsed time # Measure accuracy and record loss # Configure parameters to optimize # Define loss function (criterion) and optimizer # Perform dealloc/realloc for SelectiveConv2d modules | 2.351619 | 2 |
commands/help.py | GAM-2-5-2021/GAMbot | 0 | 6621363 | <filename>commands/help.py
import os
import sys
import discord
from discord.ext import commands
class Help(commands.Cog, name="help"):
def __init__(self, bot):
self.bot = bot
@commands.command(name="help")
async def help(self, context):
"""
Ispisuje sve komande bota.
"""
prefix = '#'
if not isinstance(prefix, str):
prefix = prefix[0]
embed = discord.Embed(title="Pomoć", description="Popis dostupnih akcija:", color=0x42F56C)
for i in self.bot.cogs:
cog = self.bot.get_cog(i.lower())
commands = cog.get_commands()
command_list = [command.name for command in commands]
command_description = [command.help for command in commands]
help_text = '\n'.join(f'{prefix}{n} - {h}' for n, h in zip(command_list, command_description))
if help_text != '':
embed.add_field(name=i.capitalize(), value=f'```{help_text}```', inline=False)
await context.send(embed=embed)
# Default discord.py help komanda
def setup(bot):
bot.add_cog(Help(bot)) | <filename>commands/help.py
import os
import sys
import discord
from discord.ext import commands
class Help(commands.Cog, name="help"):
def __init__(self, bot):
self.bot = bot
@commands.command(name="help")
async def help(self, context):
"""
Ispisuje sve komande bota.
"""
prefix = '#'
if not isinstance(prefix, str):
prefix = prefix[0]
embed = discord.Embed(title="Pomoć", description="Popis dostupnih akcija:", color=0x42F56C)
for i in self.bot.cogs:
cog = self.bot.get_cog(i.lower())
commands = cog.get_commands()
command_list = [command.name for command in commands]
command_description = [command.help for command in commands]
help_text = '\n'.join(f'{prefix}{n} - {h}' for n, h in zip(command_list, command_description))
if help_text != '':
embed.add_field(name=i.capitalize(), value=f'```{help_text}```', inline=False)
await context.send(embed=embed)
# Default discord.py help komanda
def setup(bot):
bot.add_cog(Help(bot)) | sr | 0.205251 | Ispisuje sve komande bota. # Default discord.py help komanda | 2.689829 | 3 |
the_ends/EquationCollection.py | Kevincavender/The-Ends | 1 | 6621364 | from the_ends.EquationErrorCheck import EquationErrorCheck
import re
class EquationCollection:
"""
This Class is for interacting with metadata of the equations entered as a string
Example of equations and variables dictionaries
block_number : used for the solution order of the equations
error: used for giving specific error results to the user
variables and equations are cross listed to allow for storing resulting values in one place
"""
def __init__(self, equation_string=''):
self.equation_string = equation_string
# each equation dictionary within the self.equations dictionary
# will get it's own index number
# (starting at 1 because 0 is being used by the example listed here)
self.equations = {
0: dict(
equation='',
variables=[],
solved=False,
line_number=0,
error='',
block_number=0),
}
self.variables = {
0: dict(
variable_name='',
equations=[],
value=1.0,
initial_value=1.0,
solved=False
),
}
self.number_of_equations = 0
self.number_of_variables = 0
self.variable_list = []
self.equation_list = []
self.solved_variable_list = []
self.solved_equation_list = []
self.master = {}
# will populate the previous variables
self.parse_eqns_from_string(self.equation_string)
self.update_class()
def __repr__(self):
# this needs a detailed printout of what's in the string
return str(self.equations)
def update_class(self):
# this order is important
self.update_variable_dictionary()
self.update_equation_list()
self.update_variable_list()
self.update_number_of_equations()
self.update_number_of_variables()
self.update_solved_equation_list()
self.update_solved_variable_list()
self.update_master()
def parse_eqns_from_string(self, in_string):
"""
the purpose of this function is to take the list of equations,
- remove spaces
- uppercase all characters
- change certain characters for processing
^ -> **
example input
['x = y+ 1', 'y = 2^2', 'squ = 2', 'yo = 20']
example output
['X=Y+1', 'Y=2**2', 'SQU=2', 'YO=20']
*************************
# Current functionality of this code:
# takes in a string with line carriages
# splits it down to individual lines
# enumerates lines
# cleans up whitespace
# capitalizes all letters
"""
list_of_lines = in_string.split("\n")
list_of_lines = list(enumerate(list_of_lines))
print(list_of_lines)
for i in list_of_lines:
#
if i[1] == '':
list_of_lines.remove(i) # remove empty lines
# TODO elif: the string starts with comment symbol (#)
# TODO elif: the string starts with special character ($)
else:
j = list(i)
j[1] = j[1].replace(' ', '') # remove spaces
j[1] = j[1].replace('\t', '') # remove tabs
j[1] = j[1].replace("^", "**") # for python to understand exponential's
# j[1] = j[1].upper() # for upper casing the equations
self.add_equation_to_dictionary(j[1], j[0]+1)
def add_equation_to_dictionary(self, equation_string, line_number=0):
new_equation_number = max(list(self.equations.keys())) + 1
# need to check and parse equation string
# need to check and parse variables in equation
equation = equation_string # must be string
functions_in_equation = self.separate_functions(equation)
variables_in_equation = self.separate_variables(equation) # must be list of strings
self.equations[new_equation_number] = {
"equation": equation,
"variables": variables_in_equation,
"solved": False,
"line_number": line_number,
"error": '',
"block_number": 0,
"root_equation": '',
"functions": functions_in_equation
}
def update_master(self):
self.master = {
"Equations": self.equations,
"Variables": self.variables,
"Number of Equations": self.number_of_equations,
"Equation List": self.equation_list,
"Solved Equations": self.solved_equation_list,
"Number of Variables": self.number_of_variables,
"Variable List": self.variable_list,
"Solved Variable List": self.solved_variable_list
}
def update_variable_dictionary(self):
# looks at equation dictionary to update variable dictionary
# 1: {'equation': 'x=1', "variables": ['X'], 'solved': False, "line_number": 2, "error": ''},
# 1: {'variable_name': 'a', 'equations': ['a=1'], 'value': 1.0, 'initial_value': 1.0, 'solved': False},
# self.update_variable_list()
# self.update_equation_list()
#
# for i in self.equations:
# # i = 3 (int)
# # i is a dictionary of equation information
# current_variable_list = self.equations[i]['variables']
# current_equation = self.equations[i]['equation']
# for j in current_variable_list:
# # j = 'x' (string, variable)
# self.update_variable_list()
# self.update_equation_list()
# new_variables_number = max(list(self.variables.keys())) + 1
# if j not in self.variable_list:
# self.variables[new_variables_number] = {
# 'variable_name': j,
# 'equations': [current_equation],
# 'value': 1.0,
# 'initial_value': 1.0,
# 'solved': False
# }
# print([current_equation])
# print(self.variables[i]['equations'])
# for k in self.variables:
# if (current_equation not in self.variables[k]['equations']) and (k not in current_variable_list):
# self.variables[k]['equations'].append(current_equation)
for i in self.equations:
# Creates new variable dictionary if none exists
variable_list_in_equation = self.equations[i]['variables']
equation_in_equation = self.equations[i]['equation']
for var in variable_list_in_equation:
self.update_variable_list()
new_variables_number = max(list(self.variables.keys())) + 1
if var not in self.variable_list:
self.variables[new_variables_number] = {
'variable_name': var,
'equations': [],
'value': 1.0,
'initial_value': 1.0,
'solved': False
}
for j in self.variables:
variable_in_variables = self.variables[j]['variable_name']
equations_in_variables = self.variables[j]['equations']
check1 = equation_in_equation not in equations_in_variables
check2 = variable_in_variables in variable_list_in_equation
if check1 and check2:
self.variables[j]['equations'].append(equation_in_equation)
return
def parse_line(self, line_string, line_number):
line = EquationErrorCheck(line_string, line_number)
try:
line.checkline()
except SyntaxError:
print("Syntax Error Detected:\n\tLine error")
equations = line_string.split("\n")
equations = equations.replace(' ', '')
return equations
def update_number_of_equations(self):
self.number_of_equations = len(self.equations)
def update_number_of_variables(self):
self.number_of_variables = len(self.variables)
def update_variable_list(self):
self.variable_list = self.update_list_from_dictionary(self.variables, 'variable_name', self.variable_list)
def update_equation_list(self):
self.equation_list = self.update_list_from_dictionary(self.equations, 'equation', self.equation_list)
def update_solved_variable_list(self):
for i in self.variables:
if self.variables[i]['solved'] is True:
self.solved_variable_list.append(self.variables[i]['variable_name'])
self.solved_variable_list = list(set(self.solved_variable_list))
def update_solved_equation_list(self):
for i in self.equations:
if self.equations[i]['solved'] is True:
self.solved_equation_list.append(self.equations[i]['equation'])
self.solved_equation_list = list(set(self.solved_equation_list))
def update_list_from_dictionary(self, dictionary_name: dict, key_name: str, list_name: list) -> list:
# function to update a list taking a key from the variable or equation dictionary
# this is intended to ignore duplicates
for i in dictionary_name:
if dictionary_name[i][key_name] not in list_name:
list_name.append(dictionary_name[i][key_name])
list_name_output = list(set(list_name))
return list_name_output
def separate_functions(self, equation=False):
# TODO integrate function separation here
pass
def separate_variables(self, equation=False):
"""
split equations by the all known operators
store in list for processing
:param equation:
:return:
list of variables (without copies)
"""
variables = []
if equation is False:
for k in self.equations:
# regular expression for splitting strings with given characters
split_equations = re.split(r"[=+\-^*/\\()\[\]]", self.equations[k]['equation'])
for i in split_equations:
if self.is_variable(i) and i not in variables:
variables.append(i)
return variables
elif isinstance(equation, str):
equation = equation.replace(' ','')
split_equations = re.split(r"[=+\-^*/\\()\[\]]", equation)
for i in split_equations:
if self.is_variable(i) and i not in variables:
variables.append(i)
return variables
else:
raise TypeError
def is_variable(self, variable):
if variable is '':
return False
elif variable.isnumeric():
return False
elif self.is_float(variable):
return False
return True
def is_float(self, i):
'''
will determine if a string can be interpreted as a float
:param i: string
:return:
'''
try:
float(i)
return True
except ValueError:
return False
def equation_split(self, equation:str):
# split equations by the equals sign
equation_halves = []
split_equations = re.split(r"[=]", equation)
equation_halves.append(split_equations)
return equation_halves
def reorder_equation_string(self, equation: str):
equation_halves = self.equation_split(equation)
reordered = [equation_halves[0], '-', '(', equation_halves[1], ')']
reordered = ''.join(reordered)
return reordered
def debug_output(self):
from pprint import pprint
print("------------------------------------------")
print("--------------DEBUG-PRINTOUT--------------")
print("------------------------------------------")
pprint(self.master, depth=1)
print("\n\n")
pprint(self.master)
def check_matching_parenthesis(self, equation_string):
# TODO need to integrate into this class
# Use for Function finding/checking
count = 0
for i in equation_string:
if i == "(":
count += 1
elif i == ")":
count -= 1
if count < 0:
return False
return count == 0
if __name__ == "__main__":
EQ = EquationCollection("x=1\ny=2\n\na= x+y\nsqu=sqa(")
EQ.add_equation_to_dictionary("words=1", 4)
EQ.update_class()
EQ.debug_output()
| from the_ends.EquationErrorCheck import EquationErrorCheck
import re
class EquationCollection:
"""
This Class is for interacting with metadata of the equations entered as a string
Example of equations and variables dictionaries
block_number : used for the solution order of the equations
error: used for giving specific error results to the user
variables and equations are cross listed to allow for storing resulting values in one place
"""
def __init__(self, equation_string=''):
self.equation_string = equation_string
# each equation dictionary within the self.equations dictionary
# will get it's own index number
# (starting at 1 because 0 is being used by the example listed here)
self.equations = {
0: dict(
equation='',
variables=[],
solved=False,
line_number=0,
error='',
block_number=0),
}
self.variables = {
0: dict(
variable_name='',
equations=[],
value=1.0,
initial_value=1.0,
solved=False
),
}
self.number_of_equations = 0
self.number_of_variables = 0
self.variable_list = []
self.equation_list = []
self.solved_variable_list = []
self.solved_equation_list = []
self.master = {}
# will populate the previous variables
self.parse_eqns_from_string(self.equation_string)
self.update_class()
def __repr__(self):
# this needs a detailed printout of what's in the string
return str(self.equations)
def update_class(self):
# this order is important
self.update_variable_dictionary()
self.update_equation_list()
self.update_variable_list()
self.update_number_of_equations()
self.update_number_of_variables()
self.update_solved_equation_list()
self.update_solved_variable_list()
self.update_master()
def parse_eqns_from_string(self, in_string):
"""
the purpose of this function is to take the list of equations,
- remove spaces
- uppercase all characters
- change certain characters for processing
^ -> **
example input
['x = y+ 1', 'y = 2^2', 'squ = 2', 'yo = 20']
example output
['X=Y+1', 'Y=2**2', 'SQU=2', 'YO=20']
*************************
# Current functionality of this code:
# takes in a string with line carriages
# splits it down to individual lines
# enumerates lines
# cleans up whitespace
# capitalizes all letters
"""
list_of_lines = in_string.split("\n")
list_of_lines = list(enumerate(list_of_lines))
print(list_of_lines)
for i in list_of_lines:
#
if i[1] == '':
list_of_lines.remove(i) # remove empty lines
# TODO elif: the string starts with comment symbol (#)
# TODO elif: the string starts with special character ($)
else:
j = list(i)
j[1] = j[1].replace(' ', '') # remove spaces
j[1] = j[1].replace('\t', '') # remove tabs
j[1] = j[1].replace("^", "**") # for python to understand exponential's
# j[1] = j[1].upper() # for upper casing the equations
self.add_equation_to_dictionary(j[1], j[0]+1)
def add_equation_to_dictionary(self, equation_string, line_number=0):
new_equation_number = max(list(self.equations.keys())) + 1
# need to check and parse equation string
# need to check and parse variables in equation
equation = equation_string # must be string
functions_in_equation = self.separate_functions(equation)
variables_in_equation = self.separate_variables(equation) # must be list of strings
self.equations[new_equation_number] = {
"equation": equation,
"variables": variables_in_equation,
"solved": False,
"line_number": line_number,
"error": '',
"block_number": 0,
"root_equation": '',
"functions": functions_in_equation
}
def update_master(self):
self.master = {
"Equations": self.equations,
"Variables": self.variables,
"Number of Equations": self.number_of_equations,
"Equation List": self.equation_list,
"Solved Equations": self.solved_equation_list,
"Number of Variables": self.number_of_variables,
"Variable List": self.variable_list,
"Solved Variable List": self.solved_variable_list
}
def update_variable_dictionary(self):
# looks at equation dictionary to update variable dictionary
# 1: {'equation': 'x=1', "variables": ['X'], 'solved': False, "line_number": 2, "error": ''},
# 1: {'variable_name': 'a', 'equations': ['a=1'], 'value': 1.0, 'initial_value': 1.0, 'solved': False},
# self.update_variable_list()
# self.update_equation_list()
#
# for i in self.equations:
# # i = 3 (int)
# # i is a dictionary of equation information
# current_variable_list = self.equations[i]['variables']
# current_equation = self.equations[i]['equation']
# for j in current_variable_list:
# # j = 'x' (string, variable)
# self.update_variable_list()
# self.update_equation_list()
# new_variables_number = max(list(self.variables.keys())) + 1
# if j not in self.variable_list:
# self.variables[new_variables_number] = {
# 'variable_name': j,
# 'equations': [current_equation],
# 'value': 1.0,
# 'initial_value': 1.0,
# 'solved': False
# }
# print([current_equation])
# print(self.variables[i]['equations'])
# for k in self.variables:
# if (current_equation not in self.variables[k]['equations']) and (k not in current_variable_list):
# self.variables[k]['equations'].append(current_equation)
for i in self.equations:
# Creates new variable dictionary if none exists
variable_list_in_equation = self.equations[i]['variables']
equation_in_equation = self.equations[i]['equation']
for var in variable_list_in_equation:
self.update_variable_list()
new_variables_number = max(list(self.variables.keys())) + 1
if var not in self.variable_list:
self.variables[new_variables_number] = {
'variable_name': var,
'equations': [],
'value': 1.0,
'initial_value': 1.0,
'solved': False
}
for j in self.variables:
variable_in_variables = self.variables[j]['variable_name']
equations_in_variables = self.variables[j]['equations']
check1 = equation_in_equation not in equations_in_variables
check2 = variable_in_variables in variable_list_in_equation
if check1 and check2:
self.variables[j]['equations'].append(equation_in_equation)
return
def parse_line(self, line_string, line_number):
line = EquationErrorCheck(line_string, line_number)
try:
line.checkline()
except SyntaxError:
print("Syntax Error Detected:\n\tLine error")
equations = line_string.split("\n")
equations = equations.replace(' ', '')
return equations
def update_number_of_equations(self):
self.number_of_equations = len(self.equations)
def update_number_of_variables(self):
self.number_of_variables = len(self.variables)
def update_variable_list(self):
self.variable_list = self.update_list_from_dictionary(self.variables, 'variable_name', self.variable_list)
def update_equation_list(self):
self.equation_list = self.update_list_from_dictionary(self.equations, 'equation', self.equation_list)
def update_solved_variable_list(self):
for i in self.variables:
if self.variables[i]['solved'] is True:
self.solved_variable_list.append(self.variables[i]['variable_name'])
self.solved_variable_list = list(set(self.solved_variable_list))
def update_solved_equation_list(self):
for i in self.equations:
if self.equations[i]['solved'] is True:
self.solved_equation_list.append(self.equations[i]['equation'])
self.solved_equation_list = list(set(self.solved_equation_list))
def update_list_from_dictionary(self, dictionary_name: dict, key_name: str, list_name: list) -> list:
# function to update a list taking a key from the variable or equation dictionary
# this is intended to ignore duplicates
for i in dictionary_name:
if dictionary_name[i][key_name] not in list_name:
list_name.append(dictionary_name[i][key_name])
list_name_output = list(set(list_name))
return list_name_output
def separate_functions(self, equation=False):
# TODO integrate function separation here
pass
def separate_variables(self, equation=False):
"""
split equations by the all known operators
store in list for processing
:param equation:
:return:
list of variables (without copies)
"""
variables = []
if equation is False:
for k in self.equations:
# regular expression for splitting strings with given characters
split_equations = re.split(r"[=+\-^*/\\()\[\]]", self.equations[k]['equation'])
for i in split_equations:
if self.is_variable(i) and i not in variables:
variables.append(i)
return variables
elif isinstance(equation, str):
equation = equation.replace(' ','')
split_equations = re.split(r"[=+\-^*/\\()\[\]]", equation)
for i in split_equations:
if self.is_variable(i) and i not in variables:
variables.append(i)
return variables
else:
raise TypeError
def is_variable(self, variable):
if variable is '':
return False
elif variable.isnumeric():
return False
elif self.is_float(variable):
return False
return True
def is_float(self, i):
'''
will determine if a string can be interpreted as a float
:param i: string
:return:
'''
try:
float(i)
return True
except ValueError:
return False
def equation_split(self, equation:str):
# split equations by the equals sign
equation_halves = []
split_equations = re.split(r"[=]", equation)
equation_halves.append(split_equations)
return equation_halves
def reorder_equation_string(self, equation: str):
equation_halves = self.equation_split(equation)
reordered = [equation_halves[0], '-', '(', equation_halves[1], ')']
reordered = ''.join(reordered)
return reordered
def debug_output(self):
from pprint import pprint
print("------------------------------------------")
print("--------------DEBUG-PRINTOUT--------------")
print("------------------------------------------")
pprint(self.master, depth=1)
print("\n\n")
pprint(self.master)
def check_matching_parenthesis(self, equation_string):
# TODO need to integrate into this class
# Use for Function finding/checking
count = 0
for i in equation_string:
if i == "(":
count += 1
elif i == ")":
count -= 1
if count < 0:
return False
return count == 0
if __name__ == "__main__":
EQ = EquationCollection("x=1\ny=2\n\na= x+y\nsqu=sqa(")
EQ.add_equation_to_dictionary("words=1", 4)
EQ.update_class()
EQ.debug_output()
| en | 0.704283 | This Class is for interacting with metadata of the equations entered as a string Example of equations and variables dictionaries block_number : used for the solution order of the equations error: used for giving specific error results to the user variables and equations are cross listed to allow for storing resulting values in one place # each equation dictionary within the self.equations dictionary # will get it's own index number # (starting at 1 because 0 is being used by the example listed here) # will populate the previous variables # this needs a detailed printout of what's in the string # this order is important the purpose of this function is to take the list of equations, - remove spaces - uppercase all characters - change certain characters for processing ^ -> ** example input ['x = y+ 1', 'y = 2^2', 'squ = 2', 'yo = 20'] example output ['X=Y+1', 'Y=2**2', 'SQU=2', 'YO=20'] ************************* # Current functionality of this code: # takes in a string with line carriages # splits it down to individual lines # enumerates lines # cleans up whitespace # capitalizes all letters # # remove empty lines # TODO elif: the string starts with comment symbol (#) # TODO elif: the string starts with special character ($) # remove spaces # remove tabs # for python to understand exponential's # j[1] = j[1].upper() # for upper casing the equations # need to check and parse equation string # need to check and parse variables in equation # must be string # must be list of strings # looks at equation dictionary to update variable dictionary # 1: {'equation': 'x=1', "variables": ['X'], 'solved': False, "line_number": 2, "error": ''}, # 1: {'variable_name': 'a', 'equations': ['a=1'], 'value': 1.0, 'initial_value': 1.0, 'solved': False}, # self.update_variable_list() # self.update_equation_list() # # for i in self.equations: # # i = 3 (int) # # i is a dictionary of equation information # current_variable_list = self.equations[i]['variables'] # current_equation = self.equations[i]['equation'] # for j in current_variable_list: # # j = 'x' (string, variable) # self.update_variable_list() # self.update_equation_list() # new_variables_number = max(list(self.variables.keys())) + 1 # if j not in self.variable_list: # self.variables[new_variables_number] = { # 'variable_name': j, # 'equations': [current_equation], # 'value': 1.0, # 'initial_value': 1.0, # 'solved': False # } # print([current_equation]) # print(self.variables[i]['equations']) # for k in self.variables: # if (current_equation not in self.variables[k]['equations']) and (k not in current_variable_list): # self.variables[k]['equations'].append(current_equation) # Creates new variable dictionary if none exists # function to update a list taking a key from the variable or equation dictionary # this is intended to ignore duplicates # TODO integrate function separation here split equations by the all known operators store in list for processing :param equation: :return: list of variables (without copies) # regular expression for splitting strings with given characters will determine if a string can be interpreted as a float :param i: string :return: # split equations by the equals sign # TODO need to integrate into this class # Use for Function finding/checking | 3.975167 | 4 |
python_image_object_detection/scratch_research.py | shanedevane/python-image-object-detection | 1 | 6621365 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import cv2
import numpy as np
import matplotlib.pyplot as plt
import timeit
"""
- parse an image and extract as much object data as possible
- ie. instead of calling out to a API?
- how many edges? (high edges = high detail, city or crowd scene?)
- taken during day or night
- how much pixesl in foreground vs background
- color mean
"""
IMAGE_RESOURCE = '../Resources/dog.jpg'
img = cv2.imread(IMAGE_RESOURCE, cv2.IMREAD_GRAYSCALE)
# img = cv2.imread(IMAGE_RESOURCE, cv2.IMREAD_COLOR)
def avg_img_intensity_manual():
img = cv2.imread(IMAGE_RESOURCE, cv2.IMREAD_GRAYSCALE)
rows = img.shape[0]
cols = img.shape[0]
pixel_bgr = 0.0
for row in range(rows):
for col in range(cols):
pixel_bgr += float(img[row, col])
avg_pixel_bgr = pixel_bgr / float(rows*cols)
return avg_pixel_bgr
def avg_img_intensity_numpy():
img = cv2.imread(IMAGE_RESOURCE, cv2.IMREAD_GRAYSCALE)
average = np.average(img)
return average
def calc_mean():
img = cv2.imread(IMAGE_RESOURCE, cv2.IMREAD_COLOR)
mean = cv2.mean(img)
return mean
def calc_good_features():
img = cv2.imread(IMAGE_RESOURCE, cv2.IMREAD_COLOR)
grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
corners = cv2.goodFeaturesToTrack(grey, 20, 0.5, 10)
corners = np.int0(corners)
for corner in corners:
x, y = corner.ravel()
cv2.circle(img, (x, y), 3, 255, -1)
plt.imshow(img)
plt.show()
# cv2.imshow('corners', img)
def calc_harris_corner_detection():
img = cv2.imread(IMAGE_RESOURCE, cv2.IMREAD_COLOR)
grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
corners = cv2.cornerHarris(grey, 2, 3, 0.04)
corners = cv2.dilate(corners, None)
img[corners > 0.01 * corners.max()] = [0, 0, 255]
cv2.imshow('corners', img)
def calc_surf_keypoints_and_descriptors():
# SURF is a licensed algorithm!!
img = cv2.imread(IMAGE_RESOURCE, cv2.IMREAD_GRAYSCALE)
surf = cv2.xfeatures2d.SURF_create(5000)
kp, des = surf.detectAndCompute(img, None)
# print(kp)
# print(des)
# print(surf.descriptorSize())
img2 = cv2.drawKeypoints(img, kp, des, (255, 0, 0), 4)
cv2.imshow('surf', img2)
def calc_orbs_keypoints():
img = cv2.imread(IMAGE_RESOURCE, cv2.IMREAD_GRAYSCALE)
cv2.ocl.setUseOpenCL(False)
orb = cv2.ORB_create()
# kp = orb.detect(img, None)
kp, des = orb.detectAndCompute(img, None)
img2 = cv2.drawKeypoints(img, kp, des, color=(0, 255, 0), flags=0)
cv2.imshow('orbs', img2)
# DO FAST NEXT
# http://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_feature2d/py_fast/py_fast.html#fast
if True:
calc_orbs_keypoints()
if False:
calc_surf_keypoints_and_descriptors()
if False:
calc_harris_corner_detection()
if False:
calc_good_features()
if False:
#(124.68195753094464, 121.6443755826813, 112.67007171242899, 0.0)
print(calc_mean())
if False:
# 119.71247862842034: avg "intensity" as the img was open as grayscale
print(avg_img_intensity_manual())
# 119.306965429
print(avg_img_intensity_numpy())
# setup = '''
# from __main__ import avg_img_intensity_manual, avg_img_intensity_numpy
# '''
# print(min(timeit.Timer('avg_img_intensity_manual()', setup=setup).repeat(7, 1000)))
# print(timeit.timeit(avg_img_intensity_manual(), setup="from __main__ import avg_img_intensity_manual"))
# print(timeit.timeit(avg_img_intensity_numpy(), setup="from __main__ import avg_img_intensity_numpy"))
# print(timeit.timeit("avg_img_intensity_manual()", setup="from __main__ import avg_img_intensity_manual"))
#
# print(timeit.timeit("avg_img_intensity_numpy()", setup="from __main__ import avg_img_intensity_numpy"))
# lap = cv2.Laplacian(img, cv2.CV_64F) # edges
# read to greyscale
# output image onto color
# cv2.imshow('test', img)
# cv2.imshow('test', lap)
cv2.waitKey(0)
cv2.destroyAllWindows()
# plt.imshow(img, cmap='gray')
# plt.show()
#
# for root, dirs, filenames in os.walk(dir):
# for file in filenames:
# f = open(dir + file, 'rb')
#
# with ExtractorEngine(f) as extractor:
# extractor.run_command_line_exif_tool = True
# extractor.execute()
# extractor.bulk_debug_all_print()
#
#
# | # -*- coding: utf-8 -*-
import cv2
import numpy as np
import matplotlib.pyplot as plt
import timeit
"""
- parse an image and extract as much object data as possible
- ie. instead of calling out to a API?
- how many edges? (high edges = high detail, city or crowd scene?)
- taken during day or night
- how much pixesl in foreground vs background
- color mean
"""
IMAGE_RESOURCE = '../Resources/dog.jpg'
img = cv2.imread(IMAGE_RESOURCE, cv2.IMREAD_GRAYSCALE)
# img = cv2.imread(IMAGE_RESOURCE, cv2.IMREAD_COLOR)
def avg_img_intensity_manual():
img = cv2.imread(IMAGE_RESOURCE, cv2.IMREAD_GRAYSCALE)
rows = img.shape[0]
cols = img.shape[0]
pixel_bgr = 0.0
for row in range(rows):
for col in range(cols):
pixel_bgr += float(img[row, col])
avg_pixel_bgr = pixel_bgr / float(rows*cols)
return avg_pixel_bgr
def avg_img_intensity_numpy():
img = cv2.imread(IMAGE_RESOURCE, cv2.IMREAD_GRAYSCALE)
average = np.average(img)
return average
def calc_mean():
img = cv2.imread(IMAGE_RESOURCE, cv2.IMREAD_COLOR)
mean = cv2.mean(img)
return mean
def calc_good_features():
img = cv2.imread(IMAGE_RESOURCE, cv2.IMREAD_COLOR)
grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
corners = cv2.goodFeaturesToTrack(grey, 20, 0.5, 10)
corners = np.int0(corners)
for corner in corners:
x, y = corner.ravel()
cv2.circle(img, (x, y), 3, 255, -1)
plt.imshow(img)
plt.show()
# cv2.imshow('corners', img)
def calc_harris_corner_detection():
img = cv2.imread(IMAGE_RESOURCE, cv2.IMREAD_COLOR)
grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
corners = cv2.cornerHarris(grey, 2, 3, 0.04)
corners = cv2.dilate(corners, None)
img[corners > 0.01 * corners.max()] = [0, 0, 255]
cv2.imshow('corners', img)
def calc_surf_keypoints_and_descriptors():
# SURF is a licensed algorithm!!
img = cv2.imread(IMAGE_RESOURCE, cv2.IMREAD_GRAYSCALE)
surf = cv2.xfeatures2d.SURF_create(5000)
kp, des = surf.detectAndCompute(img, None)
# print(kp)
# print(des)
# print(surf.descriptorSize())
img2 = cv2.drawKeypoints(img, kp, des, (255, 0, 0), 4)
cv2.imshow('surf', img2)
def calc_orbs_keypoints():
img = cv2.imread(IMAGE_RESOURCE, cv2.IMREAD_GRAYSCALE)
cv2.ocl.setUseOpenCL(False)
orb = cv2.ORB_create()
# kp = orb.detect(img, None)
kp, des = orb.detectAndCompute(img, None)
img2 = cv2.drawKeypoints(img, kp, des, color=(0, 255, 0), flags=0)
cv2.imshow('orbs', img2)
# DO FAST NEXT
# http://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_feature2d/py_fast/py_fast.html#fast
if True:
calc_orbs_keypoints()
if False:
calc_surf_keypoints_and_descriptors()
if False:
calc_harris_corner_detection()
if False:
calc_good_features()
if False:
#(124.68195753094464, 121.6443755826813, 112.67007171242899, 0.0)
print(calc_mean())
if False:
# 119.71247862842034: avg "intensity" as the img was open as grayscale
print(avg_img_intensity_manual())
# 119.306965429
print(avg_img_intensity_numpy())
# setup = '''
# from __main__ import avg_img_intensity_manual, avg_img_intensity_numpy
# '''
# print(min(timeit.Timer('avg_img_intensity_manual()', setup=setup).repeat(7, 1000)))
# print(timeit.timeit(avg_img_intensity_manual(), setup="from __main__ import avg_img_intensity_manual"))
# print(timeit.timeit(avg_img_intensity_numpy(), setup="from __main__ import avg_img_intensity_numpy"))
# print(timeit.timeit("avg_img_intensity_manual()", setup="from __main__ import avg_img_intensity_manual"))
#
# print(timeit.timeit("avg_img_intensity_numpy()", setup="from __main__ import avg_img_intensity_numpy"))
# lap = cv2.Laplacian(img, cv2.CV_64F) # edges
# read to greyscale
# output image onto color
# cv2.imshow('test', img)
# cv2.imshow('test', lap)
cv2.waitKey(0)
cv2.destroyAllWindows()
# plt.imshow(img, cmap='gray')
# plt.show()
#
# for root, dirs, filenames in os.walk(dir):
# for file in filenames:
# f = open(dir + file, 'rb')
#
# with ExtractorEngine(f) as extractor:
# extractor.run_command_line_exif_tool = True
# extractor.execute()
# extractor.bulk_debug_all_print()
#
#
# | en | 0.563212 | # -*- coding: utf-8 -*- - parse an image and extract as much object data as possible - ie. instead of calling out to a API? - how many edges? (high edges = high detail, city or crowd scene?) - taken during day or night - how much pixesl in foreground vs background - color mean # img = cv2.imread(IMAGE_RESOURCE, cv2.IMREAD_COLOR) # cv2.imshow('corners', img) # SURF is a licensed algorithm!! # print(kp) # print(des) # print(surf.descriptorSize()) # kp = orb.detect(img, None) # DO FAST NEXT # http://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_feature2d/py_fast/py_fast.html#fast #(124.68195753094464, 121.6443755826813, 112.67007171242899, 0.0) # 119.71247862842034: avg "intensity" as the img was open as grayscale # 119.306965429 # setup = ''' # from __main__ import avg_img_intensity_manual, avg_img_intensity_numpy # ''' # print(min(timeit.Timer('avg_img_intensity_manual()', setup=setup).repeat(7, 1000))) # print(timeit.timeit(avg_img_intensity_manual(), setup="from __main__ import avg_img_intensity_manual")) # print(timeit.timeit(avg_img_intensity_numpy(), setup="from __main__ import avg_img_intensity_numpy")) # print(timeit.timeit("avg_img_intensity_manual()", setup="from __main__ import avg_img_intensity_manual")) # # print(timeit.timeit("avg_img_intensity_numpy()", setup="from __main__ import avg_img_intensity_numpy")) # lap = cv2.Laplacian(img, cv2.CV_64F) # edges # read to greyscale # output image onto color # cv2.imshow('test', img) # cv2.imshow('test', lap) # plt.imshow(img, cmap='gray') # plt.show() # # for root, dirs, filenames in os.walk(dir): # for file in filenames: # f = open(dir + file, 'rb') # # with ExtractorEngine(f) as extractor: # extractor.run_command_line_exif_tool = True # extractor.execute() # extractor.bulk_debug_all_print() # # # | 2.998214 | 3 |
dotaenv/dota_runner.py | niksaz/dota2-replay-recorder | 0 | 6621366 | <reponame>niksaz/dota2-replay-recorder
import time
import subprocess
import pyautogui as gui
def make_sure_dota_is_launched():
if _is_dota_launched():
# _bring_into_focus()
return
# bring up spotlight search
gui.hotkey('command', 'space')
time.sleep(1)
# search for steam (assuming it is already launched)
gui.typewrite('steam', interval=0.1)
gui.press('enter')
time.sleep(1)
# search for Dota 2 in the library
gui.click(x=50, y=110)
gui.typewrite('dota', interval=0.1)
# press play
gui.click(x=335, y=225, pause=20)
def start_game():
# start
gui.click(x=974, y=668, pause=0.5)
# create lobby
gui.click(x=892, y=353, pause=2)
# start game
gui.click(x=974, y=668, pause=1)
def set_timescale():
gui.press('\\', pause=0.1)
gui.typewrite('sv_cheats 1', interval=0.05, pause=0.3)
gui.press('enter', pause=0.1)
gui.typewrite('host_timescale 5', interval=0.05, pause=0.3)
gui.press('enter', pause=0.1)
gui.press('\\', pause=0.5)
def restart_game():
_bring_into_focus()
gui.press('\\', pause=0.1)
gui.typewrite('restart', interval=0.05, pause=0.3)
gui.press('enter', pause=0.1)
gui.press('\\', pause=0.1)
time.sleep(10)
# Press keys to speed up Dota 2 launching
gui.press('esc', pause=1)
gui.press('esc', pause=1)
gui.press('esc', pause=1)
gui.press('esc', pause=1)
gui.press('esc', pause=1)
def close_game():
_bring_into_focus()
# bring up the menu
gui.click(x=373, y=223, pause=1)
# disconnect
gui.click(x=980, y=671, pause=1)
# confirm it
gui.click(x=680, y=488, pause=2)
# exit
gui.click(x=1068, y=228, pause=1)
# confirm it and wait for complete closure
gui.click(x=680, y=475, pause=15)
def _bring_into_focus():
gui.moveTo(967, 1000, pause=0.8)
gui.click(967, 1000, pause=0.1)
gui.click(750, 400, pause=0.1)
def _is_dota_launched():
return _find_process("dota").find(b"dota 2 beta") != -1
def _find_process(process_name):
ps = subprocess.Popen("ps -ef | grep " + process_name,
shell=True, stdout=subprocess.PIPE)
output = ps.stdout.read()
ps.stdout.close()
ps.wait()
return output
def run():
make_sure_dota_is_launched()
set_timescale()
start_game()
if __name__ == '__main__':
run()
| import time
import subprocess
import pyautogui as gui
def make_sure_dota_is_launched():
if _is_dota_launched():
# _bring_into_focus()
return
# bring up spotlight search
gui.hotkey('command', 'space')
time.sleep(1)
# search for steam (assuming it is already launched)
gui.typewrite('steam', interval=0.1)
gui.press('enter')
time.sleep(1)
# search for Dota 2 in the library
gui.click(x=50, y=110)
gui.typewrite('dota', interval=0.1)
# press play
gui.click(x=335, y=225, pause=20)
def start_game():
# start
gui.click(x=974, y=668, pause=0.5)
# create lobby
gui.click(x=892, y=353, pause=2)
# start game
gui.click(x=974, y=668, pause=1)
def set_timescale():
gui.press('\\', pause=0.1)
gui.typewrite('sv_cheats 1', interval=0.05, pause=0.3)
gui.press('enter', pause=0.1)
gui.typewrite('host_timescale 5', interval=0.05, pause=0.3)
gui.press('enter', pause=0.1)
gui.press('\\', pause=0.5)
def restart_game():
_bring_into_focus()
gui.press('\\', pause=0.1)
gui.typewrite('restart', interval=0.05, pause=0.3)
gui.press('enter', pause=0.1)
gui.press('\\', pause=0.1)
time.sleep(10)
# Press keys to speed up Dota 2 launching
gui.press('esc', pause=1)
gui.press('esc', pause=1)
gui.press('esc', pause=1)
gui.press('esc', pause=1)
gui.press('esc', pause=1)
def close_game():
_bring_into_focus()
# bring up the menu
gui.click(x=373, y=223, pause=1)
# disconnect
gui.click(x=980, y=671, pause=1)
# confirm it
gui.click(x=680, y=488, pause=2)
# exit
gui.click(x=1068, y=228, pause=1)
# confirm it and wait for complete closure
gui.click(x=680, y=475, pause=15)
def _bring_into_focus():
gui.moveTo(967, 1000, pause=0.8)
gui.click(967, 1000, pause=0.1)
gui.click(750, 400, pause=0.1)
def _is_dota_launched():
return _find_process("dota").find(b"dota 2 beta") != -1
def _find_process(process_name):
ps = subprocess.Popen("ps -ef | grep " + process_name,
shell=True, stdout=subprocess.PIPE)
output = ps.stdout.read()
ps.stdout.close()
ps.wait()
return output
def run():
make_sure_dota_is_launched()
set_timescale()
start_game()
if __name__ == '__main__':
run() | en | 0.841723 | # _bring_into_focus() # bring up spotlight search # search for steam (assuming it is already launched) # search for Dota 2 in the library # press play # start # create lobby # start game # Press keys to speed up Dota 2 launching # bring up the menu # disconnect # confirm it # exit # confirm it and wait for complete closure | 2.621089 | 3 |
mysite/core/admin.py | pybites/myreadinglist | 1 | 6621367 | <filename>mysite/core/admin.py
from django.contrib import admin
from .models import Book, Search, Like, Status
admin.site.register(Book)
admin.site.register(Search)
admin.site.register(Like)
admin.site.register(Status)
| <filename>mysite/core/admin.py
from django.contrib import admin
from .models import Book, Search, Like, Status
admin.site.register(Book)
admin.site.register(Search)
admin.site.register(Like)
admin.site.register(Status)
| none | 1 | 1.436373 | 1 | |
scripts/quest/q25500e.py | G00dBye/YYMS | 54 | 6621368 | <filename>scripts/quest/q25500e.py<gh_stars>10-100
# Created by MechAviv
# Quest ID :: 25500
# Eclipse and Sunfire
sm.setSpeakerID(1032209)
LIGHT = sm.getQuestEx(25502, "light")
DARK = sm.getQuestEx(25502, "dark")
sm.sendSayOkay("I guess you're not quite ready to fully control Light and Darkness. Why don't you practice a little more? You reached Sunfire #b" + str(LIGHT) + " times#k and Eclipse #b" + str(DARK) + " times#k.") | <filename>scripts/quest/q25500e.py<gh_stars>10-100
# Created by MechAviv
# Quest ID :: 25500
# Eclipse and Sunfire
sm.setSpeakerID(1032209)
LIGHT = sm.getQuestEx(25502, "light")
DARK = sm.getQuestEx(25502, "dark")
sm.sendSayOkay("I guess you're not quite ready to fully control Light and Darkness. Why don't you practice a little more? You reached Sunfire #b" + str(LIGHT) + " times#k and Eclipse #b" + str(DARK) + " times#k.") | en | 0.535596 | # Created by MechAviv # Quest ID :: 25500 # Eclipse and Sunfire #b" + str(LIGHT) + " times#k and Eclipse #b" + str(DARK) + " times#k.") | 1.553737 | 2 |
Bmw_cars/Bmw.py | kchaitugit96/Projects | 0 | 6621369 | <filename>Bmw_cars/Bmw.py
class Bmw:
def __init__(self):
self.models=['i8','x2','x6']
def models_bmw(self):
print("models are :")
for models in self.models:
print(models)
beamer=Bmw()
beamer.models_bmw()
| <filename>Bmw_cars/Bmw.py
class Bmw:
def __init__(self):
self.models=['i8','x2','x6']
def models_bmw(self):
print("models are :")
for models in self.models:
print(models)
beamer=Bmw()
beamer.models_bmw()
| none | 1 | 2.677627 | 3 | |
perfect_fit_prospectus/views.py | froddd/great-international-ui | 1 | 6621370 | import boto3
from botocore.exceptions import ClientError
from django.http import HttpResponseRedirect, Http404
from django.conf import settings
from django.contrib.messages.views import SuccessMessageMixin
from django.views.generic import FormView, TemplateView
from django.views.generic.base import View
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from directory_components import mixins
from pir_client.client import pir_api_client
from core import helpers
from core.mixins import InternationalHeaderMixin
from core.header_config import tier_one_nav_items
from perfect_fit_prospectus.forms import PerfectFitProspectusForm
class PerfectFitBaseView(
mixins.CountryDisplayMixin,
InternationalHeaderMixin,
mixins.GA360Mixin
):
header_section = tier_one_nav_items.EXPAND
def dispatch(self, request, *args, **kwargs):
dispatch_result = super().dispatch(request, *args, **kwargs)
ga360_data = helpers.get_ga_data_for_page(self.page_type)
self.set_ga360_payload(
page_id=self.page_type,
business_unit=ga360_data['business_unit'],
site_section=ga360_data['site_section'],
site_subsection=ga360_data['site_subsection']
)
return dispatch_result
class PerfectFitProspectusMainView(PerfectFitBaseView, SuccessMessageMixin, FormView):
form_class = PerfectFitProspectusForm
template_name = 'perfect_fit_prospectus/index.html'
success_url = reverse_lazy('perfect_fit_prospectus:success')
success_message = _(
'Thank You. Your Perfect Fit Prospectus has been emailed to %(email)s'
)
page_type = 'PerfectFitFormPage'
def form_valid(self, form):
data = form.cleaned_data
response = pir_api_client.create_report(data)
response.raise_for_status()
return super().form_valid(form)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
response = pir_api_client.get_options()
options = response.json()['actions']['POST']
sector_choices = [
(sector['value'], sector['display_name'])
for sector in options['sector']['choices']
]
country_choices = [
(country['value'], country['display_name'])
for country in options['country']['choices']
]
kwargs['sector_choices'] = sector_choices
kwargs['country_choices'] = country_choices
return kwargs
class PerfectFitProspectusSuccessView(PerfectFitBaseView, TemplateView):
template_name = 'perfect_fit_prospectus/success.html'
page_type = 'PerfectFitFormSuccessPage'
class PerfectFitProspectusReportProxyView(View):
def get(self, request, filename):
client = boto3.client(
's3',
aws_access_key_id=settings.PFP_AWS_S3_PDF_STORE_ACCESS_KEY_ID,
aws_secret_access_key=settings.PFP_AWS_S3_PDF_STORE_SECRET_ACCESS_KEY, # NOQA
region_name=settings.PFP_AWS_S3_PDF_STORE_BUCKET_REGION,
)
try:
client.head_object(
Bucket=settings.PFP_AWS_S3_PDF_STORE_BUCKET_NAME, Key=filename
)
except ClientError:
raise Http404
url = client.generate_presigned_url(
'get_object',
Params={
'Bucket': settings.PFP_AWS_S3_PDF_STORE_BUCKET_NAME,
'Key': filename
},
ExpiresIn=3600
)
return HttpResponseRedirect(url)
| import boto3
from botocore.exceptions import ClientError
from django.http import HttpResponseRedirect, Http404
from django.conf import settings
from django.contrib.messages.views import SuccessMessageMixin
from django.views.generic import FormView, TemplateView
from django.views.generic.base import View
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from directory_components import mixins
from pir_client.client import pir_api_client
from core import helpers
from core.mixins import InternationalHeaderMixin
from core.header_config import tier_one_nav_items
from perfect_fit_prospectus.forms import PerfectFitProspectusForm
class PerfectFitBaseView(
mixins.CountryDisplayMixin,
InternationalHeaderMixin,
mixins.GA360Mixin
):
header_section = tier_one_nav_items.EXPAND
def dispatch(self, request, *args, **kwargs):
dispatch_result = super().dispatch(request, *args, **kwargs)
ga360_data = helpers.get_ga_data_for_page(self.page_type)
self.set_ga360_payload(
page_id=self.page_type,
business_unit=ga360_data['business_unit'],
site_section=ga360_data['site_section'],
site_subsection=ga360_data['site_subsection']
)
return dispatch_result
class PerfectFitProspectusMainView(PerfectFitBaseView, SuccessMessageMixin, FormView):
form_class = PerfectFitProspectusForm
template_name = 'perfect_fit_prospectus/index.html'
success_url = reverse_lazy('perfect_fit_prospectus:success')
success_message = _(
'Thank You. Your Perfect Fit Prospectus has been emailed to %(email)s'
)
page_type = 'PerfectFitFormPage'
def form_valid(self, form):
data = form.cleaned_data
response = pir_api_client.create_report(data)
response.raise_for_status()
return super().form_valid(form)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
response = pir_api_client.get_options()
options = response.json()['actions']['POST']
sector_choices = [
(sector['value'], sector['display_name'])
for sector in options['sector']['choices']
]
country_choices = [
(country['value'], country['display_name'])
for country in options['country']['choices']
]
kwargs['sector_choices'] = sector_choices
kwargs['country_choices'] = country_choices
return kwargs
class PerfectFitProspectusSuccessView(PerfectFitBaseView, TemplateView):
template_name = 'perfect_fit_prospectus/success.html'
page_type = 'PerfectFitFormSuccessPage'
class PerfectFitProspectusReportProxyView(View):
def get(self, request, filename):
client = boto3.client(
's3',
aws_access_key_id=settings.PFP_AWS_S3_PDF_STORE_ACCESS_KEY_ID,
aws_secret_access_key=settings.PFP_AWS_S3_PDF_STORE_SECRET_ACCESS_KEY, # NOQA
region_name=settings.PFP_AWS_S3_PDF_STORE_BUCKET_REGION,
)
try:
client.head_object(
Bucket=settings.PFP_AWS_S3_PDF_STORE_BUCKET_NAME, Key=filename
)
except ClientError:
raise Http404
url = client.generate_presigned_url(
'get_object',
Params={
'Bucket': settings.PFP_AWS_S3_PDF_STORE_BUCKET_NAME,
'Key': filename
},
ExpiresIn=3600
)
return HttpResponseRedirect(url)
| none | 1 | 1.797042 | 2 | |
Codewars/8kyu/all-star-code-challenge-number-18/Python/solution1.py | RevansChen/online-judge | 7 | 6621371 | # Python - 3.6.0
str_count = lambda string, letter: string.count(letter)
| # Python - 3.6.0
str_count = lambda string, letter: string.count(letter)
| fi | 0.099875 | # Python - 3.6.0 | 2.701595 | 3 |
analytic_functions.py | jlmonge/FlaskDatabaseQuery | 0 | 6621372 | <reponame>jlmonge/FlaskDatabaseQuery
#this file will contain all the functions that we use to create the data that we will pass to the graphs
from datetime import date
from decimal import Decimal, DecimalException
# ----- check_float() -----
# Helper function for analytics, used for input validation.
# Passes in the value to be tested.
# Makes no external function calls.
# Checks the value can be expressed as a floating point number.
# Returns TRUE if valid, FALSE if invalid.
# -------------
def check_float(potential_float):
try:
float(potential_float)
return True
except ValueError:
return False
# --------------------------
# ----- most_funded_category_per_year() -----
# Helper function for analytics, namely... well, analytics_most_funded_category()
# Passes in the year to test for, and the datafile to be read.
# Makes no external function calls.
# Reads each entry, finds the pledged value, and increments it to the corresponding category if the year is correct.
# Returns a list containing the category with the highest amount pledged for the requested year, and that amount.
# ---------------------
def most_funded_category_per_year(year , file_data):
category_dict = { # key=main category, value= total amount pledged for the year
'Games':0, 'Design': 0, 'Technology': 0, 'Film & Video': 0, 'Music': 0,
'Publishing': 0, 'Fashion': 0, 'Food': 0, 'Art': 0,
'Comics': 0, 'Photography': 0, 'Theater': 0, 'Crafts': 0,
'Journalism': 0, 'Dance': 0}
result = []
if len(file_data) == 0 or file_data == [{}]:
return result
for key in file_data:
if key['main_category'] in category_dict.keys():
if check_float(key['pledged']):
str = key['launched']
if str[0:4] == year:
category_dict[key['main_category']] += float(key['pledged'])
list_of_values = category_dict.values()
max_value = max(list_of_values)
result.append(max_value)
max_key = max(category_dict, key=category_dict.get)
result.append(max_key)
return result
# -------------------------------------------
# ----- bad_date() -----
# Helper function for analytics, used for input validation.
# Passes in the date to be read, expected to be in the format "yyyy-mm-dd hh:mm:ss", or at least "yyyy-mm-dd"
# Makes no external function calls.
# Reads the date and checks it against various criteria:
# - A project could not be launched before 2008, when Kickstarter was created.
# - A project should not be made after the year 3000, when humans have learned ascension, computers have become obsolete, and the Earth has been reclaimed by nature.
# - A project's month should not be less than 1, for January, or greater than 12, for December.
# - A project's day should not be less than 1 or greater than 31, because those days do not exist.
# Returns a boolean of TRUE indicating invalid date, or FALSE if correct.
# -----------
def bad_date(date):
if(len(date) < 10):
return True
try:
yearNum = int(date[0:4])
monthNum = int(date[5:7])
dayNum = int(date[8:10])
except:
return True
if yearNum < 2008 or yearNum > 3000:
return True
if monthNum < 1 or monthNum > 12:
return True
if dayNum < 1 or dayNum > 31:
return True
return False
# -----------------------
# ----- average_length_ks() -----
# Helper function for analytics, namely make_length_analytic()
# Passes in the datafile to be read.
# Calls on bad_date() for input validation.
# Reads each entry, collects the start and end date, adds the difference to the entry's year.
# Returns the completed list of years, list of average kickstarter lengths for those years, and the total average across all years.
# ---------------
def average_length_ks(pyfile):
labels = [] #labels for each datapoint
returnData = [] #datapoints(average length per year)
totalAverage = 0
totalDates = 0
dataByMonth = [] #
#listValues = ["year",0.0,0]#"year or total", sum of lengths, number of values
if len(pyfile) == 0 or pyfile == [{}]:
return labels,returnData,totalAverage
for i in pyfile: # For every entry,
if bad_date(i["launched"]) or bad_date(i["deadline"]): # Check if dates are valid,
continue
startDate = date(int(i["launched"][0:4]),int(i["launched"][5:7]),int(i["launched"][8:10])) # Gather the starting time
endDate = date(int(i["deadline"][0:4]),int(i["deadline"][5:7]),int(i["deadline"][8:10])) # and the ending time,
timeBetween = endDate - startDate # Find the difference,
if timeBetween.days < 0:
continue
yearNotInList = True
for val in range(len(dataByMonth)): # Then for all currently collected data,
if dataByMonth[val][0] == i["launched"][0:4]: # Find the year,
yearNotInList = False
dataByMonth[val][1] = dataByMonth[val][1] + timeBetween.days # add this entry's time to the year's total,
dataByMonth[val][2] = dataByMonth[val][2] + 1 # and increment the project count.
if yearNotInList:
dataByMonth.append([i["launched"][0:4],timeBetween.days,1]) # If year is missing, these are the first values for it.
#sort by year
for iteration in dataByMonth: # For every year in the collected data,
labels.append(iteration[0]) # Add the year to labels list,
returnData.append(iteration[1]/iteration[2]) # Add that year's (total length / total projects) average to returnData,
totalDates = iteration[2] + totalDates # and calculate the totals.
totalAverage = iteration[1] + totalAverage
if totalDates == 0:#error check for if there were only bad kickstarters passed in to prevent divide by zero
totalAverage = 0
else:
totalAverage = totalAverage/totalDates
# Finally, return everything.
return labels, returnData,totalAverage
# --------------------------------
# ----- countProjects() -----
# Helper function for analytics, namely popularMonth().
# Passes in the datafile to be read.
# Calls on bad_date for input validation.
# Reads each entry, collects the date launched, and increments the corresponding list.
# Returns the completed dictionary.
# ----------------
def countProjects(dataFile):
# list format: {Year}:[Jan,Feb,Mar,Apr,May,Jun,Jul,Aug,Sep,Oct,Nov,Dec]
# each value represents the number of projects launched in that month for that year.
retDict = {}
if len(dataFile) == 0 or dataFile == [{}]:
return retDict
yearList = gatherYears(dataFile)
for year in yearList:
retDict[str(year)] = [0,0,0,0,0,0,0,0,0,0,0,0]
for item in dataFile:
launchTime = item['launched'] # 2012-03-17 03:24:11
if (bad_date(launchTime) == False): #checks to see if launch time is actually a date
launchVals = launchTime.split('-') # ['2012', '03', '17 03:24:11']
retDict[launchVals[0]][(int(launchVals[1]) - 1)] += 1
return retDict
# ----------------------------
# ----- count_cat_fail_success() -----
# Helper function for analytics, namely category_fail()
# Passes in the data file to be read.
# Makes no external function calls.
# Reads each entry and increments the fail or success value for its category, depending on its state.
# Returns the list of category titles, and the completed list of ratios for those categories
# -----------------
def count_cat_fail_success(data):
if len(data) == 0 or data == [{}]:
return [{}]
category_dict = { # key=main category, value=#successful[0],#failed[1]
'Games':[0,0], 'Design':[0,0], 'Technology':[0,0], 'Film & Video':[0,0], 'Music':[0,0],
'Publishing':[0,0], 'Fashion':[0,0], 'Food':[0,0], 'Art':[0,0],
'Comics':[0,0], 'Photography':[0,0], 'Theater':[0,0], 'Crafts':[0,0],
'Journalism':[0,0], 'Dance':[0,0]}
for proj in data:
if proj['state'] == 'successful':
category_dict[proj['main_category']][0] += 1
elif proj['state'] == 'failed' or proj['state'] == 'canceled':
category_dict[proj['main_category']][1] += 1
category_names = list(category_dict.keys())
# FOR DEBUGGING: category_successful = [x[0] for x in list(category_dict.values())]
# FOR DEBUGGING: category_failed = [x[1] for x in list(category_dict.values())]
category_failed_ratio = [x[1] / (x[0] + x[1]) if x[0] or x[1] else 0 for x \
in list(category_dict.values())] # list comprehension
return category_names, category_failed_ratio
# -------------------------------------
# ----- findAmbitious() -----
# Helper function for analytics, namely ambitiousProjects()
# Passes in the data file to be read.
# Calls on bad_date() for input validation.
# Reads each entry, locates which year and month it belongs to, compares goals, keeps the higher one.
# If goals are equal, keeps the project with the highest pledged.
# Returns the completed and sorted-by-date dictionary
# -------------
def findAmbitious(dataFile):
# dictionary format: {year-month}:[ID,goal,pledged]
retDict = {}
if len(dataFile) == 0 or dataFile == [{}]:
return retDict
for item in dataFile:
if (bad_date(item['launched']) == False): # 2012-03-17 03:24:11
date = item['launched'][0:7] # 2012-03
try:
int(item['ID'])
Decimal(item['goal'])
Decimal(item['pledged'])
except (ValueError, DecimalException):
continue
itemVals = [int(item['ID']),int(Decimal(item['goal'])),int(Decimal(item['pledged']))]
try:
compVals = retDict.get(date)
# if goal is higher, or goal is equal and pledged is higher
if ((itemVals[1] > compVals[1]) or ((itemVals[1] == compVals[1]) and (itemVals[2] > compVals[2]))):
retDict[date] = itemVals
except:
retDict.setdefault(date, itemVals)
sortDict = {}
for i in sorted(retDict):
sortDict[i] = retDict[i]
return sortDict
# ---------------------------
# ----- gatherYears() -----
# Helper function for analytics, namely ambitiousProjects() and countProjects().
# Passes in the data file to be read.
# Calls on bad_date for input validation.
# Reads each entry, adds a new year if it is not yet added.
# Returns the completed list of years.
# -------------
def gatherYears(dataFile):
retList = []
if len(dataFile) == 0 or dataFile == [{}]:
return retList
for item in dataFile:
date = item['launched'] # 2012-03-17 03:24:11
if (bad_date(date) == False):
try: retList.index(date[0:4]) # find 2012 in list, if not...
except: retList.append(date[0:4]) # add 2012 to list
retList.sort() # sort years in ascending order
return retList
# -------------------------
# ----- createDropdown() -----
# Helper function for analytics, namely ambitiousProjects() and countProjects().
# Passes in the figure to edit, the number of bars, the keys for the bar data, the list of tab titles, and the number of bars to be seen on each tab.
# Makes no external function calls.
# Creates a dropdown menu with the desired characteristics, and applies it to the figure.
# Returns the edited figure.
# ----------------------------
def createDropdown(figure,barCount,titleKeys,titleList,barsPerTab):
tabList = []
visList = []
labelList = []
for i in range(barCount): # Add a visual boolean for every bar
visList.append(False)
for key in titleKeys: # Add each desired tab title to a list
labelList.append(key)
for i in range(int(barCount / barsPerTab)): # Add a new tab to tabList (number of tabs = barCount divided by barsPerTab)
tabList.append(
dict(
label=labelList[i],
method="update",
args=[{"visible": []}, # This blank list will be filled later
{"title": titleList[i]}]
)
)
visIndex = 0
for item in tabList: # For every tab to be made,
copyVis = visList.copy() # Create a copy of our visList
try:
for i in range(barsPerTab):
copyVis[(visIndex + i)] = True # and allow only the necessary bars to be seen
except:
print('An error occurred! Graph may not display correctly!') # If something bad happens, don't crash
finally:
item['args'][0]['visible'] = copyVis # Update this bar's visible arguments to the proper values instead of a blank list
visIndex += barsPerTab # Increment visIndex for the next loop
# Update the figure with its new, fancy dropdown menu!
figure.update_layout(
updatemenus=[
dict(
active=0,
buttons=tabList
)
]
)
return figure
# ----------------------------
# ----- count_categories_per_month() -----
# Helper function for analytics, namely category_per_month().
# Passes in the data file to be read.
# Makes no external function calls.
# Counts the number of projects belonging to each month and its corresponding category.
# Returns the completed dictionary of categories for all months.
# ------------------
def count_categories_per_month(data):
# Check if it is necessary to create dictionary
if len(data) == 0 or not data[0]:#quick check to see if pyfile is either empty or has an empty dictionary inside
print("empty file passed into analytic")
return [{}]
# Initialize variables
month_dict = {'01':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], '02':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
'03':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], '04':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], '05':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
'06':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], '07':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], '08':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
'09':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], '10':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], '11':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
'12':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}
categories = ['Games', 'Design', 'Technology', 'Film & Video', 'Music', 'Publishing',
'Fashion', 'Food', 'Art', 'Comics', 'Photography', 'Theater', 'Crafts', 'Journalism',
'Dance']
# Increment each category respectively
for proj in data:
projDate = proj['launched']
if bad_date(projDate):
continue
projMonth = projDate[5:7] # substring of the month
projCat = proj['main_category']
if projCat in categories:
catIndex = categories.index(projCat)
month_dict[projMonth][catIndex] += 1 #increment up that category
return month_dict
# --------------------------------------
# ----- get_countrys_category() -----
# Helper function for analytics, namely popular_category_perNation().
# Passes in the data file to be read.
# Makes no external function calls.
# Counts the number of projects belonging to each country, and its corresponding category.
# Returns the completed dictionary of categories for all countries.
# ----------------
def get_countrys_category(data):
# Check if it is necessary to create dictionary
if len(data) == 0 or not data[0]:#quick check to see if pyfile is either empty or has an empty dictionary inside
print("empty file passed into analytic")
return {}
# Initialize variables
categories = ['Games', 'Design', 'Technology', 'Film & Video', 'Music', 'Publishing',
'Fashion', 'Food', 'Art', 'Comics', 'Photography', 'Theater', 'Crafts', 'Journalism',
'Dance']
analyticDict = {}
# Loop through dataset to add entries
for proj in data:
projCountry = proj['country']
projCat = proj['main_category']
if projCat not in categories:
continue
catIndex = categories.index(projCat)
if projCountry in analyticDict.keys(): # no need to create new entry in the dictionary
analyticDict[projCountry][catIndex] += 1
else:
#makes entry for the newly detected country
analyticDict[projCountry] = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
analyticDict[projCountry][catIndex] += 1
return analyticDict
# ---------------------------------
##Successful words analytics
def count_words(data):
count_dict = {}
for item in data:
if 'state' in item.keys():
if(item['state'] == "successful"):
res = item['name'].split()
for i in res:
if(len(i) >= 4):
if i in count_dict:
count_dict[i] += 1
else:
count_dict[i] = 1
return count_dict | #this file will contain all the functions that we use to create the data that we will pass to the graphs
from datetime import date
from decimal import Decimal, DecimalException
# ----- check_float() -----
# Helper function for analytics, used for input validation.
# Passes in the value to be tested.
# Makes no external function calls.
# Checks the value can be expressed as a floating point number.
# Returns TRUE if valid, FALSE if invalid.
# -------------
def check_float(potential_float):
try:
float(potential_float)
return True
except ValueError:
return False
# --------------------------
# ----- most_funded_category_per_year() -----
# Helper function for analytics, namely... well, analytics_most_funded_category()
# Passes in the year to test for, and the datafile to be read.
# Makes no external function calls.
# Reads each entry, finds the pledged value, and increments it to the corresponding category if the year is correct.
# Returns a list containing the category with the highest amount pledged for the requested year, and that amount.
# ---------------------
def most_funded_category_per_year(year , file_data):
category_dict = { # key=main category, value= total amount pledged for the year
'Games':0, 'Design': 0, 'Technology': 0, 'Film & Video': 0, 'Music': 0,
'Publishing': 0, 'Fashion': 0, 'Food': 0, 'Art': 0,
'Comics': 0, 'Photography': 0, 'Theater': 0, 'Crafts': 0,
'Journalism': 0, 'Dance': 0}
result = []
if len(file_data) == 0 or file_data == [{}]:
return result
for key in file_data:
if key['main_category'] in category_dict.keys():
if check_float(key['pledged']):
str = key['launched']
if str[0:4] == year:
category_dict[key['main_category']] += float(key['pledged'])
list_of_values = category_dict.values()
max_value = max(list_of_values)
result.append(max_value)
max_key = max(category_dict, key=category_dict.get)
result.append(max_key)
return result
# -------------------------------------------
# ----- bad_date() -----
# Helper function for analytics, used for input validation.
# Passes in the date to be read, expected to be in the format "yyyy-mm-dd hh:mm:ss", or at least "yyyy-mm-dd"
# Makes no external function calls.
# Reads the date and checks it against various criteria:
# - A project could not be launched before 2008, when Kickstarter was created.
# - A project should not be made after the year 3000, when humans have learned ascension, computers have become obsolete, and the Earth has been reclaimed by nature.
# - A project's month should not be less than 1, for January, or greater than 12, for December.
# - A project's day should not be less than 1 or greater than 31, because those days do not exist.
# Returns a boolean of TRUE indicating invalid date, or FALSE if correct.
# -----------
def bad_date(date):
if(len(date) < 10):
return True
try:
yearNum = int(date[0:4])
monthNum = int(date[5:7])
dayNum = int(date[8:10])
except:
return True
if yearNum < 2008 or yearNum > 3000:
return True
if monthNum < 1 or monthNum > 12:
return True
if dayNum < 1 or dayNum > 31:
return True
return False
# -----------------------
# ----- average_length_ks() -----
# Helper function for analytics, namely make_length_analytic()
# Passes in the datafile to be read.
# Calls on bad_date() for input validation.
# Reads each entry, collects the start and end date, adds the difference to the entry's year.
# Returns the completed list of years, list of average kickstarter lengths for those years, and the total average across all years.
# ---------------
def average_length_ks(pyfile):
labels = [] #labels for each datapoint
returnData = [] #datapoints(average length per year)
totalAverage = 0
totalDates = 0
dataByMonth = [] #
#listValues = ["year",0.0,0]#"year or total", sum of lengths, number of values
if len(pyfile) == 0 or pyfile == [{}]:
return labels,returnData,totalAverage
for i in pyfile: # For every entry,
if bad_date(i["launched"]) or bad_date(i["deadline"]): # Check if dates are valid,
continue
startDate = date(int(i["launched"][0:4]),int(i["launched"][5:7]),int(i["launched"][8:10])) # Gather the starting time
endDate = date(int(i["deadline"][0:4]),int(i["deadline"][5:7]),int(i["deadline"][8:10])) # and the ending time,
timeBetween = endDate - startDate # Find the difference,
if timeBetween.days < 0:
continue
yearNotInList = True
for val in range(len(dataByMonth)): # Then for all currently collected data,
if dataByMonth[val][0] == i["launched"][0:4]: # Find the year,
yearNotInList = False
dataByMonth[val][1] = dataByMonth[val][1] + timeBetween.days # add this entry's time to the year's total,
dataByMonth[val][2] = dataByMonth[val][2] + 1 # and increment the project count.
if yearNotInList:
dataByMonth.append([i["launched"][0:4],timeBetween.days,1]) # If year is missing, these are the first values for it.
#sort by year
for iteration in dataByMonth: # For every year in the collected data,
labels.append(iteration[0]) # Add the year to labels list,
returnData.append(iteration[1]/iteration[2]) # Add that year's (total length / total projects) average to returnData,
totalDates = iteration[2] + totalDates # and calculate the totals.
totalAverage = iteration[1] + totalAverage
if totalDates == 0:#error check for if there were only bad kickstarters passed in to prevent divide by zero
totalAverage = 0
else:
totalAverage = totalAverage/totalDates
# Finally, return everything.
return labels, returnData,totalAverage
# --------------------------------
# ----- countProjects() -----
# Helper function for analytics, namely popularMonth().
# Passes in the datafile to be read.
# Calls on bad_date for input validation.
# Reads each entry, collects the date launched, and increments the corresponding list.
# Returns the completed dictionary.
# ----------------
def countProjects(dataFile):
# list format: {Year}:[Jan,Feb,Mar,Apr,May,Jun,Jul,Aug,Sep,Oct,Nov,Dec]
# each value represents the number of projects launched in that month for that year.
retDict = {}
if len(dataFile) == 0 or dataFile == [{}]:
return retDict
yearList = gatherYears(dataFile)
for year in yearList:
retDict[str(year)] = [0,0,0,0,0,0,0,0,0,0,0,0]
for item in dataFile:
launchTime = item['launched'] # 2012-03-17 03:24:11
if (bad_date(launchTime) == False): #checks to see if launch time is actually a date
launchVals = launchTime.split('-') # ['2012', '03', '17 03:24:11']
retDict[launchVals[0]][(int(launchVals[1]) - 1)] += 1
return retDict
# ----------------------------
# ----- count_cat_fail_success() -----
# Helper function for analytics, namely category_fail()
# Passes in the data file to be read.
# Makes no external function calls.
# Reads each entry and increments the fail or success value for its category, depending on its state.
# Returns the list of category titles, and the completed list of ratios for those categories
# -----------------
def count_cat_fail_success(data):
if len(data) == 0 or data == [{}]:
return [{}]
category_dict = { # key=main category, value=#successful[0],#failed[1]
'Games':[0,0], 'Design':[0,0], 'Technology':[0,0], 'Film & Video':[0,0], 'Music':[0,0],
'Publishing':[0,0], 'Fashion':[0,0], 'Food':[0,0], 'Art':[0,0],
'Comics':[0,0], 'Photography':[0,0], 'Theater':[0,0], 'Crafts':[0,0],
'Journalism':[0,0], 'Dance':[0,0]}
for proj in data:
if proj['state'] == 'successful':
category_dict[proj['main_category']][0] += 1
elif proj['state'] == 'failed' or proj['state'] == 'canceled':
category_dict[proj['main_category']][1] += 1
category_names = list(category_dict.keys())
# FOR DEBUGGING: category_successful = [x[0] for x in list(category_dict.values())]
# FOR DEBUGGING: category_failed = [x[1] for x in list(category_dict.values())]
category_failed_ratio = [x[1] / (x[0] + x[1]) if x[0] or x[1] else 0 for x \
in list(category_dict.values())] # list comprehension
return category_names, category_failed_ratio
# -------------------------------------
# ----- findAmbitious() -----
# Helper function for analytics, namely ambitiousProjects()
# Passes in the data file to be read.
# Calls on bad_date() for input validation.
# Reads each entry, locates which year and month it belongs to, compares goals, keeps the higher one.
# If goals are equal, keeps the project with the highest pledged.
# Returns the completed and sorted-by-date dictionary
# -------------
def findAmbitious(dataFile):
# dictionary format: {year-month}:[ID,goal,pledged]
retDict = {}
if len(dataFile) == 0 or dataFile == [{}]:
return retDict
for item in dataFile:
if (bad_date(item['launched']) == False): # 2012-03-17 03:24:11
date = item['launched'][0:7] # 2012-03
try:
int(item['ID'])
Decimal(item['goal'])
Decimal(item['pledged'])
except (ValueError, DecimalException):
continue
itemVals = [int(item['ID']),int(Decimal(item['goal'])),int(Decimal(item['pledged']))]
try:
compVals = retDict.get(date)
# if goal is higher, or goal is equal and pledged is higher
if ((itemVals[1] > compVals[1]) or ((itemVals[1] == compVals[1]) and (itemVals[2] > compVals[2]))):
retDict[date] = itemVals
except:
retDict.setdefault(date, itemVals)
sortDict = {}
for i in sorted(retDict):
sortDict[i] = retDict[i]
return sortDict
# ---------------------------
# ----- gatherYears() -----
# Helper function for analytics, namely ambitiousProjects() and countProjects().
# Passes in the data file to be read.
# Calls on bad_date for input validation.
# Reads each entry, adds a new year if it is not yet added.
# Returns the completed list of years.
# -------------
def gatherYears(dataFile):
retList = []
if len(dataFile) == 0 or dataFile == [{}]:
return retList
for item in dataFile:
date = item['launched'] # 2012-03-17 03:24:11
if (bad_date(date) == False):
try: retList.index(date[0:4]) # find 2012 in list, if not...
except: retList.append(date[0:4]) # add 2012 to list
retList.sort() # sort years in ascending order
return retList
# -------------------------
# ----- createDropdown() -----
# Helper function for analytics, namely ambitiousProjects() and countProjects().
# Passes in the figure to edit, the number of bars, the keys for the bar data, the list of tab titles, and the number of bars to be seen on each tab.
# Makes no external function calls.
# Creates a dropdown menu with the desired characteristics, and applies it to the figure.
# Returns the edited figure.
# ----------------------------
def createDropdown(figure,barCount,titleKeys,titleList,barsPerTab):
tabList = []
visList = []
labelList = []
for i in range(barCount): # Add a visual boolean for every bar
visList.append(False)
for key in titleKeys: # Add each desired tab title to a list
labelList.append(key)
for i in range(int(barCount / barsPerTab)): # Add a new tab to tabList (number of tabs = barCount divided by barsPerTab)
tabList.append(
dict(
label=labelList[i],
method="update",
args=[{"visible": []}, # This blank list will be filled later
{"title": titleList[i]}]
)
)
visIndex = 0
for item in tabList: # For every tab to be made,
copyVis = visList.copy() # Create a copy of our visList
try:
for i in range(barsPerTab):
copyVis[(visIndex + i)] = True # and allow only the necessary bars to be seen
except:
print('An error occurred! Graph may not display correctly!') # If something bad happens, don't crash
finally:
item['args'][0]['visible'] = copyVis # Update this bar's visible arguments to the proper values instead of a blank list
visIndex += barsPerTab # Increment visIndex for the next loop
# Update the figure with its new, fancy dropdown menu!
figure.update_layout(
updatemenus=[
dict(
active=0,
buttons=tabList
)
]
)
return figure
# ----------------------------
# ----- count_categories_per_month() -----
# Helper function for analytics, namely category_per_month().
# Passes in the data file to be read.
# Makes no external function calls.
# Counts the number of projects belonging to each month and its corresponding category.
# Returns the completed dictionary of categories for all months.
# ------------------
def count_categories_per_month(data):
# Check if it is necessary to create dictionary
if len(data) == 0 or not data[0]:#quick check to see if pyfile is either empty or has an empty dictionary inside
print("empty file passed into analytic")
return [{}]
# Initialize variables
month_dict = {'01':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], '02':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
'03':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], '04':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], '05':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
'06':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], '07':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], '08':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
'09':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], '10':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], '11':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
'12':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}
categories = ['Games', 'Design', 'Technology', 'Film & Video', 'Music', 'Publishing',
'Fashion', 'Food', 'Art', 'Comics', 'Photography', 'Theater', 'Crafts', 'Journalism',
'Dance']
# Increment each category respectively
for proj in data:
projDate = proj['launched']
if bad_date(projDate):
continue
projMonth = projDate[5:7] # substring of the month
projCat = proj['main_category']
if projCat in categories:
catIndex = categories.index(projCat)
month_dict[projMonth][catIndex] += 1 #increment up that category
return month_dict
# --------------------------------------
# ----- get_countrys_category() -----
# Helper function for analytics, namely popular_category_perNation().
# Passes in the data file to be read.
# Makes no external function calls.
# Counts the number of projects belonging to each country, and its corresponding category.
# Returns the completed dictionary of categories for all countries.
# ----------------
def get_countrys_category(data):
# Check if it is necessary to create dictionary
if len(data) == 0 or not data[0]:#quick check to see if pyfile is either empty or has an empty dictionary inside
print("empty file passed into analytic")
return {}
# Initialize variables
categories = ['Games', 'Design', 'Technology', 'Film & Video', 'Music', 'Publishing',
'Fashion', 'Food', 'Art', 'Comics', 'Photography', 'Theater', 'Crafts', 'Journalism',
'Dance']
analyticDict = {}
# Loop through dataset to add entries
for proj in data:
projCountry = proj['country']
projCat = proj['main_category']
if projCat not in categories:
continue
catIndex = categories.index(projCat)
if projCountry in analyticDict.keys(): # no need to create new entry in the dictionary
analyticDict[projCountry][catIndex] += 1
else:
#makes entry for the newly detected country
analyticDict[projCountry] = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
analyticDict[projCountry][catIndex] += 1
return analyticDict
# ---------------------------------
##Successful words analytics
def count_words(data):
count_dict = {}
for item in data:
if 'state' in item.keys():
if(item['state'] == "successful"):
res = item['name'].split()
for i in res:
if(len(i) >= 4):
if i in count_dict:
count_dict[i] += 1
else:
count_dict[i] = 1
return count_dict | en | 0.751442 | #this file will contain all the functions that we use to create the data that we will pass to the graphs # ----- check_float() ----- # Helper function for analytics, used for input validation. # Passes in the value to be tested. # Makes no external function calls. # Checks the value can be expressed as a floating point number. # Returns TRUE if valid, FALSE if invalid. # ------------- # -------------------------- # ----- most_funded_category_per_year() ----- # Helper function for analytics, namely... well, analytics_most_funded_category() # Passes in the year to test for, and the datafile to be read. # Makes no external function calls. # Reads each entry, finds the pledged value, and increments it to the corresponding category if the year is correct. # Returns a list containing the category with the highest amount pledged for the requested year, and that amount. # --------------------- # key=main category, value= total amount pledged for the year # ------------------------------------------- # ----- bad_date() ----- # Helper function for analytics, used for input validation. # Passes in the date to be read, expected to be in the format "yyyy-mm-dd hh:mm:ss", or at least "yyyy-mm-dd" # Makes no external function calls. # Reads the date and checks it against various criteria: # - A project could not be launched before 2008, when Kickstarter was created. # - A project should not be made after the year 3000, when humans have learned ascension, computers have become obsolete, and the Earth has been reclaimed by nature. # - A project's month should not be less than 1, for January, or greater than 12, for December. # - A project's day should not be less than 1 or greater than 31, because those days do not exist. # Returns a boolean of TRUE indicating invalid date, or FALSE if correct. # ----------- # ----------------------- # ----- average_length_ks() ----- # Helper function for analytics, namely make_length_analytic() # Passes in the datafile to be read. # Calls on bad_date() for input validation. # Reads each entry, collects the start and end date, adds the difference to the entry's year. # Returns the completed list of years, list of average kickstarter lengths for those years, and the total average across all years. # --------------- #labels for each datapoint #datapoints(average length per year) # #listValues = ["year",0.0,0]#"year or total", sum of lengths, number of values # For every entry, # Check if dates are valid, # Gather the starting time # and the ending time, # Find the difference, # Then for all currently collected data, # Find the year, # add this entry's time to the year's total, # and increment the project count. # If year is missing, these are the first values for it. #sort by year # For every year in the collected data, # Add the year to labels list, # Add that year's (total length / total projects) average to returnData, # and calculate the totals. #error check for if there were only bad kickstarters passed in to prevent divide by zero # Finally, return everything. # -------------------------------- # ----- countProjects() ----- # Helper function for analytics, namely popularMonth(). # Passes in the datafile to be read. # Calls on bad_date for input validation. # Reads each entry, collects the date launched, and increments the corresponding list. # Returns the completed dictionary. # ---------------- # list format: {Year}:[Jan,Feb,Mar,Apr,May,Jun,Jul,Aug,Sep,Oct,Nov,Dec] # each value represents the number of projects launched in that month for that year. # 2012-03-17 03:24:11 #checks to see if launch time is actually a date # ['2012', '03', '17 03:24:11'] # ---------------------------- # ----- count_cat_fail_success() ----- # Helper function for analytics, namely category_fail() # Passes in the data file to be read. # Makes no external function calls. # Reads each entry and increments the fail or success value for its category, depending on its state. # Returns the list of category titles, and the completed list of ratios for those categories # ----------------- # key=main category, value=#successful[0],#failed[1] # FOR DEBUGGING: category_successful = [x[0] for x in list(category_dict.values())] # FOR DEBUGGING: category_failed = [x[1] for x in list(category_dict.values())] # list comprehension # ------------------------------------- # ----- findAmbitious() ----- # Helper function for analytics, namely ambitiousProjects() # Passes in the data file to be read. # Calls on bad_date() for input validation. # Reads each entry, locates which year and month it belongs to, compares goals, keeps the higher one. # If goals are equal, keeps the project with the highest pledged. # Returns the completed and sorted-by-date dictionary # ------------- # dictionary format: {year-month}:[ID,goal,pledged] # 2012-03-17 03:24:11 # 2012-03 # if goal is higher, or goal is equal and pledged is higher # --------------------------- # ----- gatherYears() ----- # Helper function for analytics, namely ambitiousProjects() and countProjects(). # Passes in the data file to be read. # Calls on bad_date for input validation. # Reads each entry, adds a new year if it is not yet added. # Returns the completed list of years. # ------------- # 2012-03-17 03:24:11 # find 2012 in list, if not... # add 2012 to list # sort years in ascending order # ------------------------- # ----- createDropdown() ----- # Helper function for analytics, namely ambitiousProjects() and countProjects(). # Passes in the figure to edit, the number of bars, the keys for the bar data, the list of tab titles, and the number of bars to be seen on each tab. # Makes no external function calls. # Creates a dropdown menu with the desired characteristics, and applies it to the figure. # Returns the edited figure. # ---------------------------- # Add a visual boolean for every bar # Add each desired tab title to a list # Add a new tab to tabList (number of tabs = barCount divided by barsPerTab) # This blank list will be filled later # For every tab to be made, # Create a copy of our visList # and allow only the necessary bars to be seen # If something bad happens, don't crash # Update this bar's visible arguments to the proper values instead of a blank list # Increment visIndex for the next loop # Update the figure with its new, fancy dropdown menu! # ---------------------------- # ----- count_categories_per_month() ----- # Helper function for analytics, namely category_per_month(). # Passes in the data file to be read. # Makes no external function calls. # Counts the number of projects belonging to each month and its corresponding category. # Returns the completed dictionary of categories for all months. # ------------------ # Check if it is necessary to create dictionary #quick check to see if pyfile is either empty or has an empty dictionary inside # Initialize variables # Increment each category respectively # substring of the month #increment up that category # -------------------------------------- # ----- get_countrys_category() ----- # Helper function for analytics, namely popular_category_perNation(). # Passes in the data file to be read. # Makes no external function calls. # Counts the number of projects belonging to each country, and its corresponding category. # Returns the completed dictionary of categories for all countries. # ---------------- # Check if it is necessary to create dictionary #quick check to see if pyfile is either empty or has an empty dictionary inside # Initialize variables # Loop through dataset to add entries # no need to create new entry in the dictionary #makes entry for the newly detected country # --------------------------------- ##Successful words analytics | 3.342618 | 3 |
todoserver.py | jadelonghans/todoServer | 0 | 6621373 | <filename>todoserver.py
#!/anaconda3/bin/python
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import urlparse
import json
import re
# datetimestamp according to RFC3339, 2019-06-11
"""
Tested at https://www.regextester.com/96683
9999-12-09T16:39:57-08:00
1937-01-01T12:00:27.87+00:20
1990-12-31T23:59:60Z
1985-04-12T23:20:50.52Z
1996-12-09T16:39:57-08:00
1996-12-09T16:39:57+08:00
"""
deadline_re = "^\d{4}-(0[1-9]|1[0-2])-(0[1-9]|[12]\d|3[01])T(([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(.\d{2}|)(Z|(-|\+)([01][0-9]|2[0-3]):([0-5][0-9])))$"
class Records():
records = {"events":[]}
class RequestHandler(BaseHTTPRequestHandler):
def _set_headers(self, code):
self.send_response(code)
self.send_header('Content-type','application/json')
self.end_headers()
def do_GET(self):
parsed_path = urlparse(self.path)
path_elements = parsed_path.path.split('/')[1:]
# print (self.path, path_elements)
if not ( (len(path_elements) != 3 or len(path_elements) != 4) and path_elements[:3] == ["api", "v1","event"] ):
self._set_headers(400)
return
else:
# get all registered data
if len(path_elements) == 3:
self._set_headers(200)
str_json = json.dumps(Records.records) #,indent= 1)
self.wfile.write(str.encode(str_json))
return
# get registered data by index
elif len(path_elements) == 4:
try:
id = int(path_elements[3])
print (id)
if id > 0 and len(Records.records["events"]) > 0 and len(Records.records["events"]) >= (id - 1 ):
data = Records.records["events"][id-1]
print (data)
self._set_headers(200)
str_json = json.dumps(data)
self.wfile.write(str.encode(str_json))
return
else:
self._set_headers(404)
return
except Exception as e:
print (e)
self._set_headers(400)
return
def register_data(self, data):
print (data)
deadline = data["deadline"]
pattern = re.compile(deadline_re)
if bool(pattern.match(deadline)):
entry = {}
entry["id"] = len(Records.records["events"]) + 1
entry["deadline"] = data["deadline"]
entry["title"] = data["title"]
entry["memo"] = data["memo"]
Records.records["events"].append(entry)
id = len(Records.records["events"]) # newest data is at the end of list
print ("Latest data: ",id, Records.records["events"])
return id
else:
print ("data error")
return -1
def do_POST(self):
if str(self.headers['Content-type']) != "application/json":
self._set_headers(400)
return
parsed_path = urlparse(self.path)
path_elements = parsed_path.path.split('/')[1:]
# print (path_elements) # path after the localhost:port
if not ( len(path_elements) ==3 and path_elements == ["api", "v1" ,"event"]):
self._set_headers(400)
self.end_headers()
return
else:
self.data_string = self.rfile.read(int(self.headers['Content-Length']))
data = json.loads(self.data_string)
id = self.register_data(data)
if id == -1:
self._set_headers(400)
str_json = json.dumps({'result': 'failure','message':'invalid date format'})
self.wfile.write(str.encode(str_json))
return
else:
self._set_headers(200)
str_json = json.dumps({'result': 'success','message':'registered', 'id': id})
self.wfile.write(str.encode(str_json))
return
def main():
server = HTTPServer(('', 8080), RequestHandler)
server.serve_forever()
if __name__ == '__main__':
main()
| <filename>todoserver.py
#!/anaconda3/bin/python
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import urlparse
import json
import re
# datetimestamp according to RFC3339, 2019-06-11
"""
Tested at https://www.regextester.com/96683
9999-12-09T16:39:57-08:00
1937-01-01T12:00:27.87+00:20
1990-12-31T23:59:60Z
1985-04-12T23:20:50.52Z
1996-12-09T16:39:57-08:00
1996-12-09T16:39:57+08:00
"""
deadline_re = "^\d{4}-(0[1-9]|1[0-2])-(0[1-9]|[12]\d|3[01])T(([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(.\d{2}|)(Z|(-|\+)([01][0-9]|2[0-3]):([0-5][0-9])))$"
class Records():
records = {"events":[]}
class RequestHandler(BaseHTTPRequestHandler):
def _set_headers(self, code):
self.send_response(code)
self.send_header('Content-type','application/json')
self.end_headers()
def do_GET(self):
parsed_path = urlparse(self.path)
path_elements = parsed_path.path.split('/')[1:]
# print (self.path, path_elements)
if not ( (len(path_elements) != 3 or len(path_elements) != 4) and path_elements[:3] == ["api", "v1","event"] ):
self._set_headers(400)
return
else:
# get all registered data
if len(path_elements) == 3:
self._set_headers(200)
str_json = json.dumps(Records.records) #,indent= 1)
self.wfile.write(str.encode(str_json))
return
# get registered data by index
elif len(path_elements) == 4:
try:
id = int(path_elements[3])
print (id)
if id > 0 and len(Records.records["events"]) > 0 and len(Records.records["events"]) >= (id - 1 ):
data = Records.records["events"][id-1]
print (data)
self._set_headers(200)
str_json = json.dumps(data)
self.wfile.write(str.encode(str_json))
return
else:
self._set_headers(404)
return
except Exception as e:
print (e)
self._set_headers(400)
return
def register_data(self, data):
print (data)
deadline = data["deadline"]
pattern = re.compile(deadline_re)
if bool(pattern.match(deadline)):
entry = {}
entry["id"] = len(Records.records["events"]) + 1
entry["deadline"] = data["deadline"]
entry["title"] = data["title"]
entry["memo"] = data["memo"]
Records.records["events"].append(entry)
id = len(Records.records["events"]) # newest data is at the end of list
print ("Latest data: ",id, Records.records["events"])
return id
else:
print ("data error")
return -1
def do_POST(self):
if str(self.headers['Content-type']) != "application/json":
self._set_headers(400)
return
parsed_path = urlparse(self.path)
path_elements = parsed_path.path.split('/')[1:]
# print (path_elements) # path after the localhost:port
if not ( len(path_elements) ==3 and path_elements == ["api", "v1" ,"event"]):
self._set_headers(400)
self.end_headers()
return
else:
self.data_string = self.rfile.read(int(self.headers['Content-Length']))
data = json.loads(self.data_string)
id = self.register_data(data)
if id == -1:
self._set_headers(400)
str_json = json.dumps({'result': 'failure','message':'invalid date format'})
self.wfile.write(str.encode(str_json))
return
else:
self._set_headers(200)
str_json = json.dumps({'result': 'success','message':'registered', 'id': id})
self.wfile.write(str.encode(str_json))
return
def main():
server = HTTPServer(('', 8080), RequestHandler)
server.serve_forever()
if __name__ == '__main__':
main()
| en | 0.586843 | #!/anaconda3/bin/python # datetimestamp according to RFC3339, 2019-06-11 Tested at https://www.regextester.com/96683 9999-12-09T16:39:57-08:00 1937-01-01T12:00:27.87+00:20 1990-12-31T23:59:60Z 1985-04-12T23:20:50.52Z 1996-12-09T16:39:57-08:00 1996-12-09T16:39:57+08:00 # print (self.path, path_elements) # get all registered data #,indent= 1) # get registered data by index # newest data is at the end of list # print (path_elements) # path after the localhost:port | 2.743539 | 3 |
src/frames/add_unit_frame.py | GolovPavel/ValueConverter | 1 | 6621374 | <gh_stars>1-10
import tkinter as tk
from tkinter import ttk
from tkinter.messagebox import showerror
import util
from constants.frames import MAIN_FRAME_NAME
from model.unit import Unit
class AddUnitFrame(tk.Frame):
def __init__(self, root, controller):
tk.Frame.__init__(self, root)
self.controller = controller
self.main_label = tk.Label(self, text="Добавление новой единицы измерения", font="Helvetica 25 bold")
self.main_label.pack(pady=35)
self.name_label = tk.Label(self, text="Название величины", font="Helvetica 17")
self.name_label.pack(pady=5)
self.quantity_selector = ttk.Combobox(self, values=list(self.controller.phys_quantities.keys()),
justify='center')
self.quantity_selector['state'] = 'readonly'
self.quantity_selector.pack()
self.name_label = tk.Label(self, text="Название единицы измерения", font="Helvetica 17")
self.name_label.pack(pady=5)
self.unit_name_entry = tk.Entry(self, width=24)
self.unit_name_entry.pack()
self.factor_label = tk.Label(self, text="Фактор преобразования", font="Helvetica 17")
self.factor_label.pack(pady=5)
self.factor_entry = tk.Entry(self, width=24)
self.factor_entry.pack()
self.factor_label = tk.Label(self, text="Операция преобразования", font="Helvetica 17")
self.factor_label.pack(pady=5)
self.conversion_operation_selector = ttk.Combobox(self, values=["*", "+"], justify='center')
self.conversion_operation_selector['state'] = 'readonly'
self.conversion_operation_selector.pack()
self.add_button = tk.Button(self, text="Добавить", width=20, height=3, command=self.__add_unit)
self.add_button.pack(pady=40)
self.back_button = tk.Button(self, text="Назад", width=20, height=3,
command=lambda: self.controller.show_frame(MAIN_FRAME_NAME))
self.back_button.pack()
def __add_unit(self):
try:
quantity_name = self.quantity_selector.get()
unit_name = self.unit_name_entry.get()
conversion_factor = float(self.factor_entry.get())
conversion_operation = self.conversion_operation_selector.get()
new_unit = Unit(unit_name, conversion_factor, conversion_operation)
quantities = util.get_all_quantities()
for quantity in quantities:
if quantity.name == quantity_name:
quantity.units.append(new_unit)
util.save_quantity(quantities)
self.controller.show_frame(MAIN_FRAME_NAME)
except:
showerror("Некорректные данные", "Введите корректные данные")
def render(self):
self.__clear()
self.quantity_selector['values'] = list(self.controller.phys_quantities.keys())
def __clear(self):
self.quantity_selector.set("")
self.unit_name_entry.delete(0, tk.END)
self.factor_entry.delete(0, tk.END)
self.conversion_operation_selector.set("")
| import tkinter as tk
from tkinter import ttk
from tkinter.messagebox import showerror
import util
from constants.frames import MAIN_FRAME_NAME
from model.unit import Unit
class AddUnitFrame(tk.Frame):
def __init__(self, root, controller):
tk.Frame.__init__(self, root)
self.controller = controller
self.main_label = tk.Label(self, text="Добавление новой единицы измерения", font="Helvetica 25 bold")
self.main_label.pack(pady=35)
self.name_label = tk.Label(self, text="Название величины", font="Helvetica 17")
self.name_label.pack(pady=5)
self.quantity_selector = ttk.Combobox(self, values=list(self.controller.phys_quantities.keys()),
justify='center')
self.quantity_selector['state'] = 'readonly'
self.quantity_selector.pack()
self.name_label = tk.Label(self, text="Название единицы измерения", font="Helvetica 17")
self.name_label.pack(pady=5)
self.unit_name_entry = tk.Entry(self, width=24)
self.unit_name_entry.pack()
self.factor_label = tk.Label(self, text="Фактор преобразования", font="Helvetica 17")
self.factor_label.pack(pady=5)
self.factor_entry = tk.Entry(self, width=24)
self.factor_entry.pack()
self.factor_label = tk.Label(self, text="Операция преобразования", font="Helvetica 17")
self.factor_label.pack(pady=5)
self.conversion_operation_selector = ttk.Combobox(self, values=["*", "+"], justify='center')
self.conversion_operation_selector['state'] = 'readonly'
self.conversion_operation_selector.pack()
self.add_button = tk.Button(self, text="Добавить", width=20, height=3, command=self.__add_unit)
self.add_button.pack(pady=40)
self.back_button = tk.Button(self, text="Назад", width=20, height=3,
command=lambda: self.controller.show_frame(MAIN_FRAME_NAME))
self.back_button.pack()
def __add_unit(self):
try:
quantity_name = self.quantity_selector.get()
unit_name = self.unit_name_entry.get()
conversion_factor = float(self.factor_entry.get())
conversion_operation = self.conversion_operation_selector.get()
new_unit = Unit(unit_name, conversion_factor, conversion_operation)
quantities = util.get_all_quantities()
for quantity in quantities:
if quantity.name == quantity_name:
quantity.units.append(new_unit)
util.save_quantity(quantities)
self.controller.show_frame(MAIN_FRAME_NAME)
except:
showerror("Некорректные данные", "Введите корректные данные")
def render(self):
self.__clear()
self.quantity_selector['values'] = list(self.controller.phys_quantities.keys())
def __clear(self):
self.quantity_selector.set("")
self.unit_name_entry.delete(0, tk.END)
self.factor_entry.delete(0, tk.END)
self.conversion_operation_selector.set("") | none | 1 | 2.743537 | 3 | |
rpa/__init__.py | hiroki0525/sokupad-rpa-api | 0 | 6621375 | from abc import ABC, abstractmethod
from entity.state import RpaState
from infra.sokupad_client import SokupadClient
class AbstractRpa(ABC):
def __init__(self, state: RpaState):
self._client = SokupadClient()
self._state = state
def run(self):
try:
self.start()
self.process()
except Exception as e:
print(e)
finally:
self.end()
def start(self) -> None:
user = self._get_params().user
self._client.login(user.id, user.password, user.p_ars)
def end(self) -> None:
self._client.quit()
@abstractmethod
def process(self) -> None:
pass
def _get_params(self):
return self._state.params | from abc import ABC, abstractmethod
from entity.state import RpaState
from infra.sokupad_client import SokupadClient
class AbstractRpa(ABC):
def __init__(self, state: RpaState):
self._client = SokupadClient()
self._state = state
def run(self):
try:
self.start()
self.process()
except Exception as e:
print(e)
finally:
self.end()
def start(self) -> None:
user = self._get_params().user
self._client.login(user.id, user.password, user.p_ars)
def end(self) -> None:
self._client.quit()
@abstractmethod
def process(self) -> None:
pass
def _get_params(self):
return self._state.params | none | 1 | 2.826391 | 3 | |
scripts/stacked_autoencoder.py | sylvan5/deep-learning-theano | 0 | 6621376 | <reponame>sylvan5/deep-learning-theano
#coding: utf-8
import os
import timeit
import numpy as np
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from logistic_sgd import LogisticRegression, load_data
from mlp import HiddenLayer
from denoising_autoencoder import DenoisingAutoencoder
class StackedDenoisingAutoencoder(object):
def __init__(self,
numpy_rng,
n_ins,
hidden_layers_sizes,
n_outs,
corruption_levels):
# 隠れ層オブジェクトのリスト
self.hidden_layers = []
# 自己符号化器のリスト
self.autoencoder_layers = []
# パラメータのリスト
self.params = []
# 隠れ層の数
self.n_layers = len(hidden_layers_sizes)
assert self.n_layers > 0
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
# 学習データのミニバッチ(入力データと正解ラベル)を表すシンボル
# これまでの実装と違って複数のメソッド内で使うので属性にしている
self.x = T.matrix('x')
self.y = T.ivector('y')
# ネットワークを構築
# 隠れ層の数だけループして積み上げていく
for i in xrange(self.n_layers):
# ユニット数
if i == 0:
input_size = n_ins
else:
input_size = hidden_layers_sizes[i - 1]
# 隠れ層への入力データ
if i == 0:
layer_input = self.x
else:
layer_input = self.hidden_layers[-1].output
# 多層パーセプトロンの隠れ層
# fine-tuningで重みを更新するため
hidden_layer = HiddenLayer(rng=numpy_rng,
input=layer_input,
n_in=input_size,
n_out=hidden_layers_sizes[i],
activation=T.nnet.sigmoid)
self.hidden_layers.append(hidden_layer)
self.params.extend(hidden_layer.params)
# 自己符号化器だが重みは多層パーセプトロンの隠れ層と共有
# そのため自己符号化器のparamsはない
# 自己符号化器で重みとバイアスの初期値を求めたあとfine-tuningでそのまま重みとバイアスを引き継げる
autoencoder_layer = DenoisingAutoencoder(numpy_rng=numpy_rng,
theano_rng=theano_rng,
input=layer_input,
n_visible=input_size,
n_hidden=hidden_layers_sizes[i],
W=hidden_layer.W, # 隠れ層の重みを共有
bhid=hidden_layer.b) # 隠れ層のバイアスを共有
self.autoencoder_layers.append(autoencoder_layer)
# MNISTの分類ができるように最後にロジスティック回帰層を追加
self.log_layer = LogisticRegression(
input=self.hidden_layers[-1].output,
n_in=hidden_layers_sizes[-1],
n_out=n_outs)
self.params.extend(self.log_layer.params)
# fine-tuning時のコスト関数を計算するシンボル
# 多層パーセプトロンと同じく負の対数尤度
self.finetune_cost = self.log_layer.negative_log_likelihood(self.y)
# 分類の誤差率を計算するシンボル
self.errors = self.log_layer.errors(self.y)
def pretraining_functions(self, train_set_x, batch_size):
"""自己符号化器を学習するpre-training用の関数リストを返す
教師なし学習なのでxのみを渡す"""
# 学習に使うミニバッチのインデックス
index = T.lscalar('index')
# 複数の自己符号化器で異なる値を指定できるようにシンボル化する
corruption_level = T.scalar('corruption')
learning_rate = T.scalar('lr')
batch_begin = index * batch_size
batch_end = batch_begin + batch_size
# 自己符号化器を学習する関数を生成
# 入力層に近い方から順番に追加する
pretrain_functions = []
for autoencoder in self.autoencoder_layers:
# 誤差と更新式を計算するシンボルを取得
cost, updates = autoencoder.get_cost_updates(corruption_level, learning_rate)
fn = theano.function(
inputs=[
index,
# Paramにした引数を関数呼び出し時に与えるときはPython変数名ではなく、
# Tensorの引数の名前(corruption, lr)で指定できる
theano.Param(corruption_level, default=0.2),
theano.Param(learning_rate, default=0.1)
],
outputs=cost,
updates=updates,
givens={
self.x: train_set_x[batch_begin:batch_end]
}
)
pretrain_functions.append(fn)
return pretrain_functions
def build_finetune_functions(self, datasets, batch_size, learning_rate):
"""fine-tuning用の関数を返す"""
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
index = T.lscalar('index')
gparams = T.grad(self.finetune_cost, self.params)
updates = [
(param, param - gparam * learning_rate) for param, gparam in zip(self.params, gparams)
]
train_model = theano.function(
inputs=[index],
outputs=self.finetune_cost,
updates=updates,
givens={
self.x: train_set_x[index * batch_size: (index + 1) * batch_size],
self.y: train_set_y[index * batch_size: (index + 1) * batch_size]
},
name='train')
test_score_i = theano.function(
inputs=[index],
outputs=self.errors,
givens={
self.x: test_set_x[index * batch_size: (index + 1) * batch_size],
self.y: test_set_y[index * batch_size: (index + 1) * batch_size]
},
name='test')
valid_score_i = theano.function(
inputs=[index],
outputs=self.errors,
givens={
self.x: valid_set_x[index * batch_size: (index + 1) * batch_size],
self.y: valid_set_y[index * batch_size: (index + 1) * batch_size]
},
name='validate')
def valid_score():
"""各ミニバッチのvalid誤差のリストを返す"""
return [valid_score_i(i) for i in xrange(n_valid_batches)]
def test_score():
"""各ミニバッチのtest誤差のリストを返す"""
return [test_score_i(i) for i in xrange(n_test_batches)]
return train_model, valid_score, test_score
def test_stacked_autoencoder(finetune_lr=0.1, pretraining_epochs=15,
pretrain_lr=0.001, training_epochs=200,
dataset='mnist.pkl.gz', batch_size=1,
hidden_layers_sizes=[1000, 1000, 1000],
corruption_levels=[0.1, 0.2, 0.3],
valerr_file='validation_error.txt',
testerr_file='test_error.txt'):
datasets = load_data(dataset)
train_set_x = datasets[0][0]
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
numpy_rng = np.random.RandomState(89677)
print "building the model ..."
sda = StackedDenoisingAutoencoder(
numpy_rng,
28 * 28,
hidden_layers_sizes,
10,
corruption_levels)
# Pre-training
print "getting the pre-training functions ..."
pretraining_functions = sda.pretraining_functions(train_set_x=train_set_x,
batch_size=batch_size)
print "pre-training the model ..."
start_time = timeit.default_timer()
for i in xrange(sda.n_layers):
# pre-trainingのエポック数は固定
for epoch in xrange(pretraining_epochs):
c = []
for batch_index in xrange(n_train_batches):
c.append(pretraining_functions[i](index=batch_index,
corruption=corruption_levels[i],
lr=pretrain_lr))
print "Pre-training layer %i, epoch %d, cost %f" % (i, epoch, np.mean(c))
end_time = timeit.default_timer()
training_time = end_time - start_time
print "The pretraining code for file %s ran for %.2fm" % (os.path.split(__file__)[1], training_time / 60.0)
# Fine-tuning
print "getting the fine-tuning functions ..."
train_model, validate_model, test_model = sda.build_finetune_functions(
datasets=datasets,
batch_size=batch_size,
learning_rate=finetune_lr
)
print "fine-tuning the model ..."
# eary-stoppingのパラメータ
patience = 10 * n_train_batches
patience_increase = 2.0
improvement_threshold = 0.995
validation_frequency = min(n_train_batches, patience / 2)
best_validation_loss = np.inf
test_score = 0
start_time = timeit.default_timer()
epoch = 0
done_looping = False
fp1 = open(valerr_file, "w")
fp2 = open(testerr_file, "w")
while (epoch < training_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
train_model(minibatch_index)
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
validation_losses = validate_model()
this_validation_loss = np.mean(validation_losses)
print "epoch %i, minibatch %i/%i, validation error %f %%" % (epoch, minibatch_index + 1, n_train_batches, this_validation_loss * 100)
fp1.write("%d\t%f\n" % (epoch, this_validation_loss * 100))
if this_validation_loss < best_validation_loss:
if this_validation_loss < best_validation_loss * improvement_threshold:
# 十分改善したならまだ改善の余地があるためpatienceを上げてより多くループを回せるようにする
patience = max(patience, iter * patience_increase)
print "*** iter %d / patience %d" % (iter, patience)
best_validation_loss = this_validation_loss
best_iter = iter
test_losses = test_model()
test_score = np.mean(test_losses)
print " epoch %i, minibatch %i/%i, test error of best model %f %%" % (epoch, minibatch_index + 1, n_train_batches, test_score * 100)
fp2.write("%d\t%f\n" % (epoch, test_score * 100))
# patienceを超えたらループを終了
if patience <= iter:
done_looping = True
break
end_time = timeit.default_timer()
training_time = (end_time - start_time)
print "Optimization complete with the best validation score of %f %%, on iteration %i, with test performance %f %%" \
% (best_validation_loss * 100.0, best_iter + 1, test_score * 100)
print "The training code for file %s ran for %.2fm" % (os.path.split(__file__)[1], training_time / 60.0)
fp1.close()
fp2.close()
if __name__ == "__main__":
test_stacked_autoencoder(dataset="../data/mnist.pkl.gz",
hidden_layers_sizes=[1000, 1000, 1000],
corruption_levels=[0.1, 0.2, 0.3])
| #coding: utf-8
import os
import timeit
import numpy as np
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from logistic_sgd import LogisticRegression, load_data
from mlp import HiddenLayer
from denoising_autoencoder import DenoisingAutoencoder
class StackedDenoisingAutoencoder(object):
def __init__(self,
numpy_rng,
n_ins,
hidden_layers_sizes,
n_outs,
corruption_levels):
# 隠れ層オブジェクトのリスト
self.hidden_layers = []
# 自己符号化器のリスト
self.autoencoder_layers = []
# パラメータのリスト
self.params = []
# 隠れ層の数
self.n_layers = len(hidden_layers_sizes)
assert self.n_layers > 0
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
# 学習データのミニバッチ(入力データと正解ラベル)を表すシンボル
# これまでの実装と違って複数のメソッド内で使うので属性にしている
self.x = T.matrix('x')
self.y = T.ivector('y')
# ネットワークを構築
# 隠れ層の数だけループして積み上げていく
for i in xrange(self.n_layers):
# ユニット数
if i == 0:
input_size = n_ins
else:
input_size = hidden_layers_sizes[i - 1]
# 隠れ層への入力データ
if i == 0:
layer_input = self.x
else:
layer_input = self.hidden_layers[-1].output
# 多層パーセプトロンの隠れ層
# fine-tuningで重みを更新するため
hidden_layer = HiddenLayer(rng=numpy_rng,
input=layer_input,
n_in=input_size,
n_out=hidden_layers_sizes[i],
activation=T.nnet.sigmoid)
self.hidden_layers.append(hidden_layer)
self.params.extend(hidden_layer.params)
# 自己符号化器だが重みは多層パーセプトロンの隠れ層と共有
# そのため自己符号化器のparamsはない
# 自己符号化器で重みとバイアスの初期値を求めたあとfine-tuningでそのまま重みとバイアスを引き継げる
autoencoder_layer = DenoisingAutoencoder(numpy_rng=numpy_rng,
theano_rng=theano_rng,
input=layer_input,
n_visible=input_size,
n_hidden=hidden_layers_sizes[i],
W=hidden_layer.W, # 隠れ層の重みを共有
bhid=hidden_layer.b) # 隠れ層のバイアスを共有
self.autoencoder_layers.append(autoencoder_layer)
# MNISTの分類ができるように最後にロジスティック回帰層を追加
self.log_layer = LogisticRegression(
input=self.hidden_layers[-1].output,
n_in=hidden_layers_sizes[-1],
n_out=n_outs)
self.params.extend(self.log_layer.params)
# fine-tuning時のコスト関数を計算するシンボル
# 多層パーセプトロンと同じく負の対数尤度
self.finetune_cost = self.log_layer.negative_log_likelihood(self.y)
# 分類の誤差率を計算するシンボル
self.errors = self.log_layer.errors(self.y)
def pretraining_functions(self, train_set_x, batch_size):
"""自己符号化器を学習するpre-training用の関数リストを返す
教師なし学習なのでxのみを渡す"""
# 学習に使うミニバッチのインデックス
index = T.lscalar('index')
# 複数の自己符号化器で異なる値を指定できるようにシンボル化する
corruption_level = T.scalar('corruption')
learning_rate = T.scalar('lr')
batch_begin = index * batch_size
batch_end = batch_begin + batch_size
# 自己符号化器を学習する関数を生成
# 入力層に近い方から順番に追加する
pretrain_functions = []
for autoencoder in self.autoencoder_layers:
# 誤差と更新式を計算するシンボルを取得
cost, updates = autoencoder.get_cost_updates(corruption_level, learning_rate)
fn = theano.function(
inputs=[
index,
# Paramにした引数を関数呼び出し時に与えるときはPython変数名ではなく、
# Tensorの引数の名前(corruption, lr)で指定できる
theano.Param(corruption_level, default=0.2),
theano.Param(learning_rate, default=0.1)
],
outputs=cost,
updates=updates,
givens={
self.x: train_set_x[batch_begin:batch_end]
}
)
pretrain_functions.append(fn)
return pretrain_functions
def build_finetune_functions(self, datasets, batch_size, learning_rate):
"""fine-tuning用の関数を返す"""
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
index = T.lscalar('index')
gparams = T.grad(self.finetune_cost, self.params)
updates = [
(param, param - gparam * learning_rate) for param, gparam in zip(self.params, gparams)
]
train_model = theano.function(
inputs=[index],
outputs=self.finetune_cost,
updates=updates,
givens={
self.x: train_set_x[index * batch_size: (index + 1) * batch_size],
self.y: train_set_y[index * batch_size: (index + 1) * batch_size]
},
name='train')
test_score_i = theano.function(
inputs=[index],
outputs=self.errors,
givens={
self.x: test_set_x[index * batch_size: (index + 1) * batch_size],
self.y: test_set_y[index * batch_size: (index + 1) * batch_size]
},
name='test')
valid_score_i = theano.function(
inputs=[index],
outputs=self.errors,
givens={
self.x: valid_set_x[index * batch_size: (index + 1) * batch_size],
self.y: valid_set_y[index * batch_size: (index + 1) * batch_size]
},
name='validate')
def valid_score():
"""各ミニバッチのvalid誤差のリストを返す"""
return [valid_score_i(i) for i in xrange(n_valid_batches)]
def test_score():
"""各ミニバッチのtest誤差のリストを返す"""
return [test_score_i(i) for i in xrange(n_test_batches)]
return train_model, valid_score, test_score
def test_stacked_autoencoder(finetune_lr=0.1, pretraining_epochs=15,
pretrain_lr=0.001, training_epochs=200,
dataset='mnist.pkl.gz', batch_size=1,
hidden_layers_sizes=[1000, 1000, 1000],
corruption_levels=[0.1, 0.2, 0.3],
valerr_file='validation_error.txt',
testerr_file='test_error.txt'):
datasets = load_data(dataset)
train_set_x = datasets[0][0]
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
numpy_rng = np.random.RandomState(89677)
print "building the model ..."
sda = StackedDenoisingAutoencoder(
numpy_rng,
28 * 28,
hidden_layers_sizes,
10,
corruption_levels)
# Pre-training
print "getting the pre-training functions ..."
pretraining_functions = sda.pretraining_functions(train_set_x=train_set_x,
batch_size=batch_size)
print "pre-training the model ..."
start_time = timeit.default_timer()
for i in xrange(sda.n_layers):
# pre-trainingのエポック数は固定
for epoch in xrange(pretraining_epochs):
c = []
for batch_index in xrange(n_train_batches):
c.append(pretraining_functions[i](index=batch_index,
corruption=corruption_levels[i],
lr=pretrain_lr))
print "Pre-training layer %i, epoch %d, cost %f" % (i, epoch, np.mean(c))
end_time = timeit.default_timer()
training_time = end_time - start_time
print "The pretraining code for file %s ran for %.2fm" % (os.path.split(__file__)[1], training_time / 60.0)
# Fine-tuning
print "getting the fine-tuning functions ..."
train_model, validate_model, test_model = sda.build_finetune_functions(
datasets=datasets,
batch_size=batch_size,
learning_rate=finetune_lr
)
print "fine-tuning the model ..."
# eary-stoppingのパラメータ
patience = 10 * n_train_batches
patience_increase = 2.0
improvement_threshold = 0.995
validation_frequency = min(n_train_batches, patience / 2)
best_validation_loss = np.inf
test_score = 0
start_time = timeit.default_timer()
epoch = 0
done_looping = False
fp1 = open(valerr_file, "w")
fp2 = open(testerr_file, "w")
while (epoch < training_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
train_model(minibatch_index)
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
validation_losses = validate_model()
this_validation_loss = np.mean(validation_losses)
print "epoch %i, minibatch %i/%i, validation error %f %%" % (epoch, minibatch_index + 1, n_train_batches, this_validation_loss * 100)
fp1.write("%d\t%f\n" % (epoch, this_validation_loss * 100))
if this_validation_loss < best_validation_loss:
if this_validation_loss < best_validation_loss * improvement_threshold:
# 十分改善したならまだ改善の余地があるためpatienceを上げてより多くループを回せるようにする
patience = max(patience, iter * patience_increase)
print "*** iter %d / patience %d" % (iter, patience)
best_validation_loss = this_validation_loss
best_iter = iter
test_losses = test_model()
test_score = np.mean(test_losses)
print " epoch %i, minibatch %i/%i, test error of best model %f %%" % (epoch, minibatch_index + 1, n_train_batches, test_score * 100)
fp2.write("%d\t%f\n" % (epoch, test_score * 100))
# patienceを超えたらループを終了
if patience <= iter:
done_looping = True
break
end_time = timeit.default_timer()
training_time = (end_time - start_time)
print "Optimization complete with the best validation score of %f %%, on iteration %i, with test performance %f %%" \
% (best_validation_loss * 100.0, best_iter + 1, test_score * 100)
print "The training code for file %s ran for %.2fm" % (os.path.split(__file__)[1], training_time / 60.0)
fp1.close()
fp2.close()
if __name__ == "__main__":
test_stacked_autoencoder(dataset="../data/mnist.pkl.gz",
hidden_layers_sizes=[1000, 1000, 1000],
corruption_levels=[0.1, 0.2, 0.3]) | ja | 0.998239 | #coding: utf-8 # 隠れ層オブジェクトのリスト # 自己符号化器のリスト # パラメータのリスト # 隠れ層の数 # 学習データのミニバッチ(入力データと正解ラベル)を表すシンボル # これまでの実装と違って複数のメソッド内で使うので属性にしている # ネットワークを構築 # 隠れ層の数だけループして積み上げていく # ユニット数 # 隠れ層への入力データ # 多層パーセプトロンの隠れ層 # fine-tuningで重みを更新するため # 自己符号化器だが重みは多層パーセプトロンの隠れ層と共有 # そのため自己符号化器のparamsはない # 自己符号化器で重みとバイアスの初期値を求めたあとfine-tuningでそのまま重みとバイアスを引き継げる # 隠れ層の重みを共有 # 隠れ層のバイアスを共有 # MNISTの分類ができるように最後にロジスティック回帰層を追加 # fine-tuning時のコスト関数を計算するシンボル # 多層パーセプトロンと同じく負の対数尤度 # 分類の誤差率を計算するシンボル 自己符号化器を学習するpre-training用の関数リストを返す 教師なし学習なのでxのみを渡す # 学習に使うミニバッチのインデックス # 複数の自己符号化器で異なる値を指定できるようにシンボル化する # 自己符号化器を学習する関数を生成 # 入力層に近い方から順番に追加する # 誤差と更新式を計算するシンボルを取得 # Paramにした引数を関数呼び出し時に与えるときはPython変数名ではなく、 # Tensorの引数の名前(corruption, lr)で指定できる fine-tuning用の関数を返す 各ミニバッチのvalid誤差のリストを返す 各ミニバッチのtest誤差のリストを返す # Pre-training # pre-trainingのエポック数は固定 # Fine-tuning # eary-stoppingのパラメータ # 十分改善したならまだ改善の余地があるためpatienceを上げてより多くループを回せるようにする # patienceを超えたらループを終了 | 2.187195 | 2 |
rcbi/rcbi/spiders/InfiniteFPV.py | tannewt/rcbuild.info-scrape | 2 | 6621377 | import scrapy
from scrapy import log
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from rcbi.items import Part
STOCK_STATE_MAP = {"available-on-backorder": "backordered",
"in-stock": "in_stock",
"out-of-stock": "out_of_stock"}
class InfiniteFPVSpider(CrawlSpider):
name = "infinitefpv"
allowed_domains = ["infinitefpv.com"]
start_urls = ["http://www.infinitefpv.com/shop/"]
rules = (
Rule(LinkExtractor(restrict_css=[".product-categories"])),
Rule(LinkExtractor(restrict_css=[".inner_product"]), callback='parse_item'),
)
def parse_item(self, response):
item = Part()
item["site"] = self.name
product_name = response.css("[itemprop='name']")
if not product_name:
return
item["name"] = product_name[0].xpath("//h1/text()").extract_first().strip()
variant = {}
item["variants"] = [variant]
variant["url"] = response.url
price = response.css(".price .amount")
if price:
variant["price"] = price.css("::text").extract_first()
stock = response.css(".stock")
if stock:
c = stock.css("::attr(class)").extract_first().split()[-1]
if c in STOCK_STATE_MAP:
variant["stock_state"] = STOCK_STATE_MAP[c]
variant["stock_text"] = stock.css("::text").extract_first().strip()
else:
print(c)
item["manufacturer"] = "infiniteFPV"
return item
| import scrapy
from scrapy import log
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from rcbi.items import Part
STOCK_STATE_MAP = {"available-on-backorder": "backordered",
"in-stock": "in_stock",
"out-of-stock": "out_of_stock"}
class InfiniteFPVSpider(CrawlSpider):
name = "infinitefpv"
allowed_domains = ["infinitefpv.com"]
start_urls = ["http://www.infinitefpv.com/shop/"]
rules = (
Rule(LinkExtractor(restrict_css=[".product-categories"])),
Rule(LinkExtractor(restrict_css=[".inner_product"]), callback='parse_item'),
)
def parse_item(self, response):
item = Part()
item["site"] = self.name
product_name = response.css("[itemprop='name']")
if not product_name:
return
item["name"] = product_name[0].xpath("//h1/text()").extract_first().strip()
variant = {}
item["variants"] = [variant]
variant["url"] = response.url
price = response.css(".price .amount")
if price:
variant["price"] = price.css("::text").extract_first()
stock = response.css(".stock")
if stock:
c = stock.css("::attr(class)").extract_first().split()[-1]
if c in STOCK_STATE_MAP:
variant["stock_state"] = STOCK_STATE_MAP[c]
variant["stock_text"] = stock.css("::text").extract_first().strip()
else:
print(c)
item["manufacturer"] = "infiniteFPV"
return item
| none | 1 | 2.622585 | 3 | |
fdk_client/platform/models/WebhookValidator.py | kavish-d/fdk-client-python | 0 | 6621378 | """Class Validators."""
from marshmallow import fields, Schema
from marshmallow.validate import OneOf
from ..enums import *
from ..models.BaseSchema import BaseSchema
class WebhookValidator:
class getSubscribersByCompany(BaseSchema):
page_no = fields.Int(required=False)
page_size = fields.Int(required=False)
company_id = fields.Int(required=False)
extension_id = fields.Str(required=False)
class registerSubscriberToEvent(BaseSchema):
company_id = fields.Int(required=False)
class updateSubscriberConfig(BaseSchema):
company_id = fields.Int(required=False)
class getSubscribersByExtensionId(BaseSchema):
page_no = fields.Int(required=False)
page_size = fields.Int(required=False)
company_id = fields.Int(required=False)
extension_id = fields.Str(required=False)
class getSubscriberById(BaseSchema):
company_id = fields.Int(required=False)
subscriber_id = fields.Int(required=False)
class fetchAllEventConfigurations(BaseSchema):
company_id = fields.Int(required=False)
| """Class Validators."""
from marshmallow import fields, Schema
from marshmallow.validate import OneOf
from ..enums import *
from ..models.BaseSchema import BaseSchema
class WebhookValidator:
class getSubscribersByCompany(BaseSchema):
page_no = fields.Int(required=False)
page_size = fields.Int(required=False)
company_id = fields.Int(required=False)
extension_id = fields.Str(required=False)
class registerSubscriberToEvent(BaseSchema):
company_id = fields.Int(required=False)
class updateSubscriberConfig(BaseSchema):
company_id = fields.Int(required=False)
class getSubscribersByExtensionId(BaseSchema):
page_no = fields.Int(required=False)
page_size = fields.Int(required=False)
company_id = fields.Int(required=False)
extension_id = fields.Str(required=False)
class getSubscriberById(BaseSchema):
company_id = fields.Int(required=False)
subscriber_id = fields.Int(required=False)
class fetchAllEventConfigurations(BaseSchema):
company_id = fields.Int(required=False)
| en | 0.802057 | Class Validators. | 2.255635 | 2 |
flow_models/tests/fit.py | piotrjurkiewicz/flow_stats | 9 | 6621379 | <reponame>piotrjurkiewicz/flow_stats
#!/usr/bin/python3
import argparse
import numpy as np
import scipy.stats
from ..fit import fit_mix
from ..lib.mix import to_json
from ..lib.util import logmsg, measure_memory
def test(max_iter):
logmsg("genpareto lognorm")
a = scipy.stats.genpareto.rvs(1.450289555235508, 16, 23.204632883768134, 500000)
b = scipy.stats.lognorm.rvs(5, 0, 20, 500000)
vec = np.concatenate([a, b])
mix = [
[0.2, 'genpareto', (1.450289555235508, 16, 23.204632883768134)],
[0.8, 'lognorm', (5, 0, 20)]
]
mix = fit_mix(vec, mix, max_iter=max_iter)
logmsg(to_json(mix))
logmsg("gamma")
a = scipy.stats.gamma.rvs(5.0, 0, 2.0, 500000)
b = scipy.stats.gamma.rvs(10.0, 0, 1.0, 500000)
vec = np.concatenate([a, b])
mix = [
[0.1, 'gamma', (2.0, 0, 2.0)],
[0.1, 'gamma', (6.0, 0, 1.0)]
]
mix = fit_mix(vec, mix, max_iter=max_iter)
logmsg(to_json(mix))
logmsg("weibull_min")
a = scipy.stats.weibull_min.rvs(0.763166697701473, 0, 1.805880227867377e02, 500000)
b = scipy.stats.weibull_min.rvs(0.984428347376388, 0, 9.685081880588410e04, 500000)
vec = np.concatenate([a, b])
mix = [
[0.1, 'weibull_min', (0.603166697701473, 0, 1.205880227867377e02)],
[0.1, 'weibull_min', (0.904428347376388, 0, 6.685081880588410e04)]
]
mix = fit_mix(vec, mix, max_iter=max_iter)
logmsg(to_json(mix))
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=__doc__)
parser.add_argument('-i', default=100, type=int, help='number of iterations')
parser.add_argument('--measure-memory', action='store_true', help='collect and print memory statistics')
app_args = parser.parse_args()
with measure_memory(app_args.measure_memory):
test(app_args.i)
if __name__ == '__main__':
main()
| #!/usr/bin/python3
import argparse
import numpy as np
import scipy.stats
from ..fit import fit_mix
from ..lib.mix import to_json
from ..lib.util import logmsg, measure_memory
def test(max_iter):
logmsg("genpareto lognorm")
a = scipy.stats.genpareto.rvs(1.450289555235508, 16, 23.204632883768134, 500000)
b = scipy.stats.lognorm.rvs(5, 0, 20, 500000)
vec = np.concatenate([a, b])
mix = [
[0.2, 'genpareto', (1.450289555235508, 16, 23.204632883768134)],
[0.8, 'lognorm', (5, 0, 20)]
]
mix = fit_mix(vec, mix, max_iter=max_iter)
logmsg(to_json(mix))
logmsg("gamma")
a = scipy.stats.gamma.rvs(5.0, 0, 2.0, 500000)
b = scipy.stats.gamma.rvs(10.0, 0, 1.0, 500000)
vec = np.concatenate([a, b])
mix = [
[0.1, 'gamma', (2.0, 0, 2.0)],
[0.1, 'gamma', (6.0, 0, 1.0)]
]
mix = fit_mix(vec, mix, max_iter=max_iter)
logmsg(to_json(mix))
logmsg("weibull_min")
a = scipy.stats.weibull_min.rvs(0.763166697701473, 0, 1.805880227867377e02, 500000)
b = scipy.stats.weibull_min.rvs(0.984428347376388, 0, 9.685081880588410e04, 500000)
vec = np.concatenate([a, b])
mix = [
[0.1, 'weibull_min', (0.603166697701473, 0, 1.205880227867377e02)],
[0.1, 'weibull_min', (0.904428347376388, 0, 6.685081880588410e04)]
]
mix = fit_mix(vec, mix, max_iter=max_iter)
logmsg(to_json(mix))
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=__doc__)
parser.add_argument('-i', default=100, type=int, help='number of iterations')
parser.add_argument('--measure-memory', action='store_true', help='collect and print memory statistics')
app_args = parser.parse_args()
with measure_memory(app_args.measure_memory):
test(app_args.i)
if __name__ == '__main__':
main() | fr | 0.386793 | #!/usr/bin/python3 | 2.157041 | 2 |
example/tests/basket/test_basket_models.py | icvntechstudio/django-salesman | 222 | 6621380 | <reponame>icvntechstudio/django-salesman
import pytest
from django.db import transaction
from django.db.models.deletion import ProtectedError
from salesman.basket.models import BASKET_ID_SESSION_KEY, Basket, BasketItem
from shop.models import Phone, PhoneVariant, Product
@pytest.mark.django_db
def test_get_or_create_basket_from_request(rf, django_user_model):
request = rf.get('/')
# test session basket created
basket, created = Basket.objects.get_or_create_from_request(request)
assert created
assert request.session[BASKET_ID_SESSION_KEY] == basket.id
_, created = Basket.objects.get_or_create_from_request(request)
assert not created
# test user basket created
request.user = django_user_model.objects.create_user(
username='user', password='password'
)
basket, created = Basket.objects.get_or_create_from_request(request)
assert created
assert Basket.objects.count() == 1 # session basket is merged and deleted
assert basket.owner == request.user
assert BASKET_ID_SESSION_KEY not in request.session
_, created = Basket.objects.get_or_create_from_request(request)
assert not created
# test multiple baskets merge, 1 should be left
Basket.objects.create(owner=request.user)
basket, _ = Basket.objects.get_or_create_from_request(request)
assert request.user.basket_set.count() == 1
@pytest.mark.django_db
def test_basket_str():
basket = Basket()
assert str(basket) == "(unsaved)"
basket.save()
assert str(basket) == "1"
@pytest.mark.django_db
def test_basket_update(rf):
request = rf.get('/')
basket, _ = Basket.objects.get_or_create_from_request(request)
product = Product.objects.create(name="Test", price=30)
basket.add(product, quantity=2)
assert not hasattr(basket, 'extra_rows')
assert not hasattr(basket, 'subtotal')
assert not hasattr(basket, 'total')
basket.update(request)
total = 60
total_with_modifiers = total - (
total / 10
) # 10% discount modifier is already active
assert basket.subtotal == total
assert basket.total == total_with_modifiers
assert len(basket.extra_rows) == 1
@pytest.mark.django_db
def test_basket_item_manipulation(rf):
request = rf.get('/')
basket, _ = Basket.objects.get_or_create_from_request(request)
product = Product.objects.create(name="Test")
# test add to basket
item = basket.add(product)
assert basket.count == 1
assert item.product == product
basket.add(product)
assert basket.quantity == 2
basket.add(product, ref="1-special")
assert basket.count == 2
assert basket.quantity == 3
# test remove from basket
basket.remove(item.ref)
basket.get_items() # trigger storing `_cached_items`.
assert basket.count == 1
assert basket.quantity == 1
basket.remove('non-existant-ref') # fail silently no item remove
# test basket clear
basket.clear()
assert basket.count == basket.quantity == 0
@pytest.mark.django_db
def test_basket_merge(rf):
request = rf.get('/')
basket, _ = Basket.objects.get_or_create_from_request(request)
product = Product.objects.create(name="Test")
product_2 = Product.objects.create(name="Test #2")
basket.add(product)
basket_2 = Basket.objects.create()
basket_2.add(product)
basket_2.add(product_2)
assert Basket.objects.count() == 2
basket.merge(basket_2)
assert Basket.objects.count() == 1
assert basket.count == 2
assert basket.quantity == 3
@pytest.mark.django_db
def test_basket_item(rf):
request = rf.get('/')
basket, _ = Basket.objects.get_or_create_from_request(request)
product = Product.objects.create(name="Test")
item = basket.add(product)
# test save
item.ref = None
item.save(update_fields=['ref'])
assert item.ref == BasketItem.get_product_ref(product) == 'shopproduct-1'
assert str(item) == f"1x {product}"
assert item.name == product.name
assert item.code == product.code
@pytest.mark.django_db
def test_basket_item_update(rf):
request = rf.get('/')
basket, _ = Basket.objects.get_or_create_from_request(request)
price = 30
product = Product.objects.create(name="Test", price=price)
basket.add(product)
item = basket.get_items()[0]
assert not hasattr(item, 'unit_price')
assert not hasattr(item, 'subtotal')
assert not hasattr(item, 'total')
assert not hasattr(item, 'extra_rows')
item.update(request)
assert item.unit_price == price
assert item.subtotal == price
assert item.total == price
assert len(item.extra_rows) == 0
@pytest.mark.django_db
def test_basket_item_protect(rf):
request = rf.get('/')
basket, _ = Basket.objects.get_or_create_from_request(request)
price = 30
product = Product.objects.create(name="Test", price=price)
phone = Phone.objects.create(name="Phone")
phone_product = PhoneVariant.objects.create(phone=phone)
item = basket.add(product)
basket.add(phone_product)
with transaction.atomic():
with pytest.raises(ProtectedError):
product.delete()
with transaction.atomic():
with pytest.raises(ProtectedError):
phone.delete()
basket.remove(item.ref)
product.delete()
| import pytest
from django.db import transaction
from django.db.models.deletion import ProtectedError
from salesman.basket.models import BASKET_ID_SESSION_KEY, Basket, BasketItem
from shop.models import Phone, PhoneVariant, Product
@pytest.mark.django_db
def test_get_or_create_basket_from_request(rf, django_user_model):
request = rf.get('/')
# test session basket created
basket, created = Basket.objects.get_or_create_from_request(request)
assert created
assert request.session[BASKET_ID_SESSION_KEY] == basket.id
_, created = Basket.objects.get_or_create_from_request(request)
assert not created
# test user basket created
request.user = django_user_model.objects.create_user(
username='user', password='password'
)
basket, created = Basket.objects.get_or_create_from_request(request)
assert created
assert Basket.objects.count() == 1 # session basket is merged and deleted
assert basket.owner == request.user
assert BASKET_ID_SESSION_KEY not in request.session
_, created = Basket.objects.get_or_create_from_request(request)
assert not created
# test multiple baskets merge, 1 should be left
Basket.objects.create(owner=request.user)
basket, _ = Basket.objects.get_or_create_from_request(request)
assert request.user.basket_set.count() == 1
@pytest.mark.django_db
def test_basket_str():
basket = Basket()
assert str(basket) == "(unsaved)"
basket.save()
assert str(basket) == "1"
@pytest.mark.django_db
def test_basket_update(rf):
request = rf.get('/')
basket, _ = Basket.objects.get_or_create_from_request(request)
product = Product.objects.create(name="Test", price=30)
basket.add(product, quantity=2)
assert not hasattr(basket, 'extra_rows')
assert not hasattr(basket, 'subtotal')
assert not hasattr(basket, 'total')
basket.update(request)
total = 60
total_with_modifiers = total - (
total / 10
) # 10% discount modifier is already active
assert basket.subtotal == total
assert basket.total == total_with_modifiers
assert len(basket.extra_rows) == 1
@pytest.mark.django_db
def test_basket_item_manipulation(rf):
request = rf.get('/')
basket, _ = Basket.objects.get_or_create_from_request(request)
product = Product.objects.create(name="Test")
# test add to basket
item = basket.add(product)
assert basket.count == 1
assert item.product == product
basket.add(product)
assert basket.quantity == 2
basket.add(product, ref="1-special")
assert basket.count == 2
assert basket.quantity == 3
# test remove from basket
basket.remove(item.ref)
basket.get_items() # trigger storing `_cached_items`.
assert basket.count == 1
assert basket.quantity == 1
basket.remove('non-existant-ref') # fail silently no item remove
# test basket clear
basket.clear()
assert basket.count == basket.quantity == 0
@pytest.mark.django_db
def test_basket_merge(rf):
request = rf.get('/')
basket, _ = Basket.objects.get_or_create_from_request(request)
product = Product.objects.create(name="Test")
product_2 = Product.objects.create(name="Test #2")
basket.add(product)
basket_2 = Basket.objects.create()
basket_2.add(product)
basket_2.add(product_2)
assert Basket.objects.count() == 2
basket.merge(basket_2)
assert Basket.objects.count() == 1
assert basket.count == 2
assert basket.quantity == 3
@pytest.mark.django_db
def test_basket_item(rf):
request = rf.get('/')
basket, _ = Basket.objects.get_or_create_from_request(request)
product = Product.objects.create(name="Test")
item = basket.add(product)
# test save
item.ref = None
item.save(update_fields=['ref'])
assert item.ref == BasketItem.get_product_ref(product) == 'shopproduct-1'
assert str(item) == f"1x {product}"
assert item.name == product.name
assert item.code == product.code
@pytest.mark.django_db
def test_basket_item_update(rf):
request = rf.get('/')
basket, _ = Basket.objects.get_or_create_from_request(request)
price = 30
product = Product.objects.create(name="Test", price=price)
basket.add(product)
item = basket.get_items()[0]
assert not hasattr(item, 'unit_price')
assert not hasattr(item, 'subtotal')
assert not hasattr(item, 'total')
assert not hasattr(item, 'extra_rows')
item.update(request)
assert item.unit_price == price
assert item.subtotal == price
assert item.total == price
assert len(item.extra_rows) == 0
@pytest.mark.django_db
def test_basket_item_protect(rf):
request = rf.get('/')
basket, _ = Basket.objects.get_or_create_from_request(request)
price = 30
product = Product.objects.create(name="Test", price=price)
phone = Phone.objects.create(name="Phone")
phone_product = PhoneVariant.objects.create(phone=phone)
item = basket.add(product)
basket.add(phone_product)
with transaction.atomic():
with pytest.raises(ProtectedError):
product.delete()
with transaction.atomic():
with pytest.raises(ProtectedError):
phone.delete()
basket.remove(item.ref)
product.delete() | en | 0.762984 | # test session basket created # test user basket created # session basket is merged and deleted # test multiple baskets merge, 1 should be left # 10% discount modifier is already active # test add to basket # test remove from basket # trigger storing `_cached_items`. # fail silently no item remove # test basket clear #2") # test save | 2.269507 | 2 |
exercise806.py | vchatchai/python101 | 0 | 6621381 | <filename>exercise806.py<gh_stars>0
def a(n):
even = n % 2
if n == 1 :
return 1
elif n == 2:
return 2
else :
if even == 0 and n > 0:
return a(n/2) + (a(n/2)%10)
elif even == 1 and n > 0:
return a(int((n-1)/2)-1)*((n-1)/2)
print(a(5)) | <filename>exercise806.py<gh_stars>0
def a(n):
even = n % 2
if n == 1 :
return 1
elif n == 2:
return 2
else :
if even == 0 and n > 0:
return a(n/2) + (a(n/2)%10)
elif even == 1 and n > 0:
return a(int((n-1)/2)-1)*((n-1)/2)
print(a(5)) | none | 1 | 3.557302 | 4 | |
model/bottleneck_layer.py | deep-spin/explainable-qe-shared-task | 5 | 6621382 | from functools import partial
import torch
from entmax import entmax15, sparsemax, entmax_bisect
from model.utils import masked_average
class BottleneckSummary(torch.nn.Module):
def __init__(
self,
hidden_size,
aggregation='none',
kv_rep='embeddings',
alpha=1.0,
classwise=False,
alpha_merge=0.5,
squared_attn=False
):
super().__init__()
self.classwise = classwise
self.hidden_size = hidden_size + int(self.classwise)
self.aggregation = aggregation
self.kv_rep = kv_rep
self.alpha = alpha
self.alpha_merge = alpha_merge
self.squared_attn = squared_attn
if alpha < 1.0:
self.transform_fn = torch.sigmoid
elif alpha == 1.0:
self.transform_fn = partial(torch.softmax, dim=-1)
elif alpha == 1.5:
self.transform_fn = partial(entmax15, dim=-1)
elif alpha == 2.0:
self.transform_fn = partial(sparsemax, dim=-1)
else:
self.transform_fn = partial(entmax_bisect, alpha=alpha, dim=-1)
self.q_layer = torch.nn.Linear(self.hidden_size, self.hidden_size)
self.k_layer = torch.nn.Linear(self.hidden_size, self.hidden_size)
self.v_layer = torch.nn.Linear(self.hidden_size, self.hidden_size)
self._init_weights()
def _init_weights(self):
pass
# for p in self.parameters():
# if p.dim() > 1:
# torch.nn.init.xavier_uniform_(p)
def forward(self,
hidden_states: torch.Tensor,
embeddings: torch.Tensor,
attention_mask: torch.Tensor,
first_sentence_mask: torch.Tensor,
first_piece_mask: torch.Tensor,
separate_mt_and_src=True
):
"""
Produce an estimation by adding a bottleneck computation on top of hidden states from selected layers.
Args:
hidden_states: selected hidden states with shape of (bs, ts, hdim)
embeddings: output of embeddings layer of BERT. Shape of (bs, ts, hdim)
attention_mask: binary mask, 1 indicates the positions of valid (non pad) inputs. Shape of (bs, ts)
first_sentence_mask: binary mask, 1 indicates the positions of the first sentence. Shape of (bs, ts)
first_piece_mask: binary mask, 1 indicates the positions of the first word piece. Shape of (bs, ts)
Returns:
torch.Tensor with a shape of (bs, hdim)
"""
# optionally, aggregate word pieces of k and v
if self.aggregation != 'none':
bounds, bounds_mask = self.get_bounds_from_first_piece_mask(first_piece_mask)
embeddings = self.aggregate_word_pieces(embeddings, bounds, method=self.aggregation)
hidden_states = self.aggregate_word_pieces(hidden_states, bounds, method=self.aggregation)
r = torch.arange(bounds.shape[0], device=bounds.device).unsqueeze(-1)
first_sentence_mask = first_sentence_mask[r, bounds]
first_piece_mask = first_piece_mask[r, bounds]
attention_mask = bounds_mask
# select which vectors to use to represent keys and values
kv_rep = embeddings if self.kv_rep == 'embeddings' else hidden_states
if self.alpha_merge < 0: # do attention to mt and src jointly
if not self.squared_attn:
# masked average over word pieces to get a single query representation
hidden_states_avg = masked_average(hidden_states, attention_mask)
q = self.q_layer(hidden_states_avg).unsqueeze(1)
else:
q = self.q_layer(hidden_states)
# linear map for k and v
k = self.k_layer(kv_rep)
v = self.v_layer(kv_rep)
# attention
attn_scores = torch.matmul(q, k.transpose(-1, -2)) / k.shape[-1] ** 0.5
attn_scores = attn_scores.masked_fill(~attention_mask.unsqueeze(1).bool(), -10000.0)
attn_probas = self.transform_fn(attn_scores)
if not self.squared_attn:
attn_alphas = attn_probas if self.alpha >= 1.0 else attn_probas.clone() / attn_probas.sum(-1).unsqueeze(-1)
combined_summary = torch.matmul(attn_alphas, v).squeeze(1)
# we select scores for alpha=-1 because we will use it with BCELoss and fp16
attn_probas = attn_probas.squeeze(1) if self.alpha >= 1.0 else attn_scores.squeeze(1)
else:
attn_alphas = attn_probas if self.alpha >= 1.0 else attn_probas.clone() / attn_probas.sum(-1).unsqueeze(-1)
combined_summary = masked_average(torch.matmul(attn_alphas, v), attention_mask)
# we select scores for alpha=-1 because we will use it with BCELoss and fp16
attn_probas = attn_probas if self.alpha >= 1.0 else attn_scores
attn_probas = masked_average(attn_probas, attention_mask)
if separate_mt_and_src:
# break attn probas into mt and src
mt_probas, src_probas, mt_mask, src_mask = self.separate_mt_and_src(attn_probas, first_sentence_mask,
attention_mask)
# set pad probas to zero (we might have created new pad positions when splitting <mt> from <src>)
mt_probas = mt_probas * mt_mask.float()
src_probas = src_probas * src_mask.float()
# renormalize
# mt_probas = mt_probas / mt_probas.sum(-1).unsqueeze(-1)
# src_probas = src_probas / src_probas.sum(-1).unsqueeze(-1)
attentions = (mt_probas, src_probas)
else:
attentions = attn_probas
else: # do attention to mt and src separately
hidden_states_mt, hidden_states_src, _, _ = self.separate_mt_and_src(hidden_states, first_sentence_mask,
attention_mask)
kv_rep_mt, kv_rep_src, mt_mask, src_mask = self.separate_mt_and_src(kv_rep, first_sentence_mask,
attention_mask)
# avg hidden states
hidden_states_avg_mt = masked_average(hidden_states_mt, mt_mask)
hidden_states_avg_src = masked_average(hidden_states_src, src_mask)
# do attn of src over mt
if not self.squared_attn:
q = self.q_layer(hidden_states_avg_src)
else:
q = self.q_layer(hidden_states_src)
k = self.k_layer(hidden_states_mt)
v = self.v_layer(hidden_states_mt)
s = torch.matmul(q, k.transpose(-1, -2)) / k.shape[-1] ** 0.5
s = s.masked_fill(~mt_mask.unsqueeze(1).bool(), -10000.0)
mt_probas = self.transform_fn(s)
if not self.squared_attn:
mt_summary = torch.matmul(mt_probas, v).squeeze(1)
mt_probas = mt_probas.squeeze(1).clone() if self.alpha >= 1.0 else s.squeeze(1).clone()
else:
mt_summary = torch.matmul(mt_probas, v)
mt_summary = masked_average(mt_summary, src_mask)
mt_probas = mt_probas.clone() if self.alpha >= 1.0 else s.clone()
mt_probas = masked_average(mt_probas, src_mask)
# do attn of mt over src
if not self.squared_attn:
q = self.q_layer(hidden_states_avg_mt)
else:
q = self.q_layer(hidden_states_mt)
k = self.k_layer(hidden_states_src)
v = self.v_layer(hidden_states_src)
s = torch.matmul(q, k.transpose(-1, -2)) / k.shape[-1] ** 0.5
s = s.masked_fill(~src_mask.unsqueeze(1).bool(), -10000.0)
src_probas = self.transform_fn(s)
if not self.squared_attn:
src_summary = torch.matmul(src_probas, v).squeeze(1)
src_probas = src_probas.squeeze(1).clone() if self.alpha >= 1.0 else s.squeeze(1).clone()
else:
src_summary = torch.matmul(src_probas, v)
src_summary = masked_average(src_summary, mt_mask)
src_probas = src_probas if self.alpha >= 1.0 else s
src_probas = masked_average(src_probas, mt_mask)
combined_summary = self.alpha_merge * mt_summary + (1 - self.alpha_merge) * src_summary
attentions = (mt_probas, src_probas)
return combined_summary, attentions
@staticmethod
def separate_mt_and_src(tensor, first_sentence_mask, attention_mask):
"""
Split a tensor into two according to the bool first_sentence_mask tensor.
It will use attention_mask to get rid of pad positions.
It assumes a concatenated input tensor: <mt> <src>.
Args:
tensor (torch.Tensor): shape of (bs, ts, hdim)
first_sentence_mask (torch.LongTensor): boolean tensor, with 1s indicating the positions of <mt>
and 0s of <src>. Shape of (bs, ts)
attention_mask (torch.LongTensor): mask of pad positions, 1s indicate valid and 0s indicatep pad positions.
Shape of (bs, ts)
Returns:
<mt> torch.Tensor (bs, mt_len, hdim)
<src> torch.Tensor (bs, src_len, hdim)
mt_mask torch.BoolTensor (bs, mt_len) with 1s indicating valid positions, and 0s pad positions
src_mask torch.BoolTensor (bs, src_len) with 1s indicating valid positions, and 0s pad positions
"""
first_sentence_mask = first_sentence_mask.long()
attention_mask = attention_mask.long()
# recover mt tensor
mt_len = first_sentence_mask.sum(-1).max().item()
tensor_mt = tensor[:, :mt_len].clone()
mt_mask = attention_mask[:, :mt_len] & first_sentence_mask[:, :mt_len]
# recover src tensor + rest of padding (which will be dealt later in the loss fn)
src_first = first_sentence_mask.sum(-1).min().item()
tensor_src = tensor[:, src_first:].clone()
src_mask = attention_mask[:, src_first:] & (1 - first_sentence_mask)[:, src_first:]
return tensor_mt, tensor_src, mt_mask, src_mask
@staticmethod
def get_bounds_from_first_piece_mask(first_piece_mask):
"""
Transforms a binary mask of first word piece positions to 0-indexed bounds tensor.
E.g.
[[1, 0, 0, 1, 0, 1, 0, 1],
[1, 1, 0, 0, 0, 1, 0, 0]]
will be transformed to
[[0, 3, 5, 6],
[0, 1, 5, -1]]
where -1 indicates pad positions.
"""
bs, seq_len = first_piece_mask.shape
device = first_piece_mask.device
bounds_highest_index = 999999
bounds = torch.arange(seq_len, device=device).unsqueeze(0).expand(bs, -1)
bounds = bounds.masked_fill(~first_piece_mask.bool(), bounds_highest_index)
bounds = bounds.sort(dim=-1)[0]
bounds_mask = (bounds != bounds_highest_index).bool()
max_num_bounds = bounds_mask.long().sum(-1).max().item()
bounds = bounds[:, :max_num_bounds]
bounds[bounds == bounds_highest_index] = -1
bounds_mask = (bounds != -1).bool()
return bounds, bounds_mask
@staticmethod
def aggregate_word_pieces(hidden_states, bounds, method='first'):
"""
Aggregate hidden states according to word piece tokenization.
Args:
hidden_states (torch.Tensor): output of BERT. Shape of (bs, ts, hdim)
bounds (torch.LongTensor): the indexes where the word pieces start.
Shape of (bs, ts)
e.g. Welcome to the jungle -> _Wel come _to _the _jungle
bounds[0] = [0, 2, 3, 4]
indexes for padding positions are expected to be equal to -1
method (str): the strategy used to get a representation of a word
based on its word pices. Possible choices are:
'first' = take the vector of the first word piece
'sum' = take the sum of word pieces vectors
'mean' = take the average of word pieces vectors
'max' = take the max of word pieces vectors
Returns:
torch.Tensor (bs, original_sequence_length, hdim)
"""
bs, ts, hidden_dim = hidden_states.size()
r = torch.arange(bs, device=hidden_states.device).unsqueeze(1)
if method == 'first':
return hidden_states[r, bounds]
elif method == 'sum' or method == 'mean':
neg_one_indexes = bounds.new_zeros(bs, 1) - 1
extended_bounds = torch.cat((bounds[:, 1:], neg_one_indexes), dim=1)
last_idx = (extended_bounds != -1).sum(dim=1).unsqueeze(-1) - 1
extended_bounds[r, last_idx + 1] = extended_bounds[r, last_idx] + 1
shifted_bounds = extended_bounds - 1
cumsum = hidden_states.cumsum(dim=1)
cumsum = cumsum[r, shifted_bounds]
zero_values = cumsum.new_zeros(bs, 1, hidden_dim)
shifted_cumsum = torch.cat((zero_values, cumsum[:, :-1]), dim=1)
selected_pieces = cumsum - shifted_cumsum
if method == 'mean':
lens = shifted_bounds + 1 - bounds
lens[lens == 0] = 1 # we should not have a case where lens_ij=0
selected_pieces = selected_pieces / lens.unsqueeze(-1).float()
return selected_pieces
elif method == 'max':
max_bounds_size = (bounds != -1).sum(1).max().item()
max_wordpieces = torch.zeros(bs, max_bounds_size, hidden_dim, device=bounds.device)
for i in range(bs):
bounds_len = (bounds[i] != -1).sum().item()
valid_bounds = bounds[i, :bounds_len].tolist()
valid_bounds.append(valid_bounds[-1] + 1)
slices = zip(valid_bounds[:-1], valid_bounds[1:])
for j, (k1, k2) in enumerate(slices):
x, _ = torch.max(hidden_states[i, k1:k2], dim=0)
max_wordpieces[i, j] = x
return max_wordpieces
else:
raise Exception('Method {} is not implemented'.format(method))
| from functools import partial
import torch
from entmax import entmax15, sparsemax, entmax_bisect
from model.utils import masked_average
class BottleneckSummary(torch.nn.Module):
def __init__(
self,
hidden_size,
aggregation='none',
kv_rep='embeddings',
alpha=1.0,
classwise=False,
alpha_merge=0.5,
squared_attn=False
):
super().__init__()
self.classwise = classwise
self.hidden_size = hidden_size + int(self.classwise)
self.aggregation = aggregation
self.kv_rep = kv_rep
self.alpha = alpha
self.alpha_merge = alpha_merge
self.squared_attn = squared_attn
if alpha < 1.0:
self.transform_fn = torch.sigmoid
elif alpha == 1.0:
self.transform_fn = partial(torch.softmax, dim=-1)
elif alpha == 1.5:
self.transform_fn = partial(entmax15, dim=-1)
elif alpha == 2.0:
self.transform_fn = partial(sparsemax, dim=-1)
else:
self.transform_fn = partial(entmax_bisect, alpha=alpha, dim=-1)
self.q_layer = torch.nn.Linear(self.hidden_size, self.hidden_size)
self.k_layer = torch.nn.Linear(self.hidden_size, self.hidden_size)
self.v_layer = torch.nn.Linear(self.hidden_size, self.hidden_size)
self._init_weights()
def _init_weights(self):
pass
# for p in self.parameters():
# if p.dim() > 1:
# torch.nn.init.xavier_uniform_(p)
def forward(self,
hidden_states: torch.Tensor,
embeddings: torch.Tensor,
attention_mask: torch.Tensor,
first_sentence_mask: torch.Tensor,
first_piece_mask: torch.Tensor,
separate_mt_and_src=True
):
"""
Produce an estimation by adding a bottleneck computation on top of hidden states from selected layers.
Args:
hidden_states: selected hidden states with shape of (bs, ts, hdim)
embeddings: output of embeddings layer of BERT. Shape of (bs, ts, hdim)
attention_mask: binary mask, 1 indicates the positions of valid (non pad) inputs. Shape of (bs, ts)
first_sentence_mask: binary mask, 1 indicates the positions of the first sentence. Shape of (bs, ts)
first_piece_mask: binary mask, 1 indicates the positions of the first word piece. Shape of (bs, ts)
Returns:
torch.Tensor with a shape of (bs, hdim)
"""
# optionally, aggregate word pieces of k and v
if self.aggregation != 'none':
bounds, bounds_mask = self.get_bounds_from_first_piece_mask(first_piece_mask)
embeddings = self.aggregate_word_pieces(embeddings, bounds, method=self.aggregation)
hidden_states = self.aggregate_word_pieces(hidden_states, bounds, method=self.aggregation)
r = torch.arange(bounds.shape[0], device=bounds.device).unsqueeze(-1)
first_sentence_mask = first_sentence_mask[r, bounds]
first_piece_mask = first_piece_mask[r, bounds]
attention_mask = bounds_mask
# select which vectors to use to represent keys and values
kv_rep = embeddings if self.kv_rep == 'embeddings' else hidden_states
if self.alpha_merge < 0: # do attention to mt and src jointly
if not self.squared_attn:
# masked average over word pieces to get a single query representation
hidden_states_avg = masked_average(hidden_states, attention_mask)
q = self.q_layer(hidden_states_avg).unsqueeze(1)
else:
q = self.q_layer(hidden_states)
# linear map for k and v
k = self.k_layer(kv_rep)
v = self.v_layer(kv_rep)
# attention
attn_scores = torch.matmul(q, k.transpose(-1, -2)) / k.shape[-1] ** 0.5
attn_scores = attn_scores.masked_fill(~attention_mask.unsqueeze(1).bool(), -10000.0)
attn_probas = self.transform_fn(attn_scores)
if not self.squared_attn:
attn_alphas = attn_probas if self.alpha >= 1.0 else attn_probas.clone() / attn_probas.sum(-1).unsqueeze(-1)
combined_summary = torch.matmul(attn_alphas, v).squeeze(1)
# we select scores for alpha=-1 because we will use it with BCELoss and fp16
attn_probas = attn_probas.squeeze(1) if self.alpha >= 1.0 else attn_scores.squeeze(1)
else:
attn_alphas = attn_probas if self.alpha >= 1.0 else attn_probas.clone() / attn_probas.sum(-1).unsqueeze(-1)
combined_summary = masked_average(torch.matmul(attn_alphas, v), attention_mask)
# we select scores for alpha=-1 because we will use it with BCELoss and fp16
attn_probas = attn_probas if self.alpha >= 1.0 else attn_scores
attn_probas = masked_average(attn_probas, attention_mask)
if separate_mt_and_src:
# break attn probas into mt and src
mt_probas, src_probas, mt_mask, src_mask = self.separate_mt_and_src(attn_probas, first_sentence_mask,
attention_mask)
# set pad probas to zero (we might have created new pad positions when splitting <mt> from <src>)
mt_probas = mt_probas * mt_mask.float()
src_probas = src_probas * src_mask.float()
# renormalize
# mt_probas = mt_probas / mt_probas.sum(-1).unsqueeze(-1)
# src_probas = src_probas / src_probas.sum(-1).unsqueeze(-1)
attentions = (mt_probas, src_probas)
else:
attentions = attn_probas
else: # do attention to mt and src separately
hidden_states_mt, hidden_states_src, _, _ = self.separate_mt_and_src(hidden_states, first_sentence_mask,
attention_mask)
kv_rep_mt, kv_rep_src, mt_mask, src_mask = self.separate_mt_and_src(kv_rep, first_sentence_mask,
attention_mask)
# avg hidden states
hidden_states_avg_mt = masked_average(hidden_states_mt, mt_mask)
hidden_states_avg_src = masked_average(hidden_states_src, src_mask)
# do attn of src over mt
if not self.squared_attn:
q = self.q_layer(hidden_states_avg_src)
else:
q = self.q_layer(hidden_states_src)
k = self.k_layer(hidden_states_mt)
v = self.v_layer(hidden_states_mt)
s = torch.matmul(q, k.transpose(-1, -2)) / k.shape[-1] ** 0.5
s = s.masked_fill(~mt_mask.unsqueeze(1).bool(), -10000.0)
mt_probas = self.transform_fn(s)
if not self.squared_attn:
mt_summary = torch.matmul(mt_probas, v).squeeze(1)
mt_probas = mt_probas.squeeze(1).clone() if self.alpha >= 1.0 else s.squeeze(1).clone()
else:
mt_summary = torch.matmul(mt_probas, v)
mt_summary = masked_average(mt_summary, src_mask)
mt_probas = mt_probas.clone() if self.alpha >= 1.0 else s.clone()
mt_probas = masked_average(mt_probas, src_mask)
# do attn of mt over src
if not self.squared_attn:
q = self.q_layer(hidden_states_avg_mt)
else:
q = self.q_layer(hidden_states_mt)
k = self.k_layer(hidden_states_src)
v = self.v_layer(hidden_states_src)
s = torch.matmul(q, k.transpose(-1, -2)) / k.shape[-1] ** 0.5
s = s.masked_fill(~src_mask.unsqueeze(1).bool(), -10000.0)
src_probas = self.transform_fn(s)
if not self.squared_attn:
src_summary = torch.matmul(src_probas, v).squeeze(1)
src_probas = src_probas.squeeze(1).clone() if self.alpha >= 1.0 else s.squeeze(1).clone()
else:
src_summary = torch.matmul(src_probas, v)
src_summary = masked_average(src_summary, mt_mask)
src_probas = src_probas if self.alpha >= 1.0 else s
src_probas = masked_average(src_probas, mt_mask)
combined_summary = self.alpha_merge * mt_summary + (1 - self.alpha_merge) * src_summary
attentions = (mt_probas, src_probas)
return combined_summary, attentions
@staticmethod
def separate_mt_and_src(tensor, first_sentence_mask, attention_mask):
"""
Split a tensor into two according to the bool first_sentence_mask tensor.
It will use attention_mask to get rid of pad positions.
It assumes a concatenated input tensor: <mt> <src>.
Args:
tensor (torch.Tensor): shape of (bs, ts, hdim)
first_sentence_mask (torch.LongTensor): boolean tensor, with 1s indicating the positions of <mt>
and 0s of <src>. Shape of (bs, ts)
attention_mask (torch.LongTensor): mask of pad positions, 1s indicate valid and 0s indicatep pad positions.
Shape of (bs, ts)
Returns:
<mt> torch.Tensor (bs, mt_len, hdim)
<src> torch.Tensor (bs, src_len, hdim)
mt_mask torch.BoolTensor (bs, mt_len) with 1s indicating valid positions, and 0s pad positions
src_mask torch.BoolTensor (bs, src_len) with 1s indicating valid positions, and 0s pad positions
"""
first_sentence_mask = first_sentence_mask.long()
attention_mask = attention_mask.long()
# recover mt tensor
mt_len = first_sentence_mask.sum(-1).max().item()
tensor_mt = tensor[:, :mt_len].clone()
mt_mask = attention_mask[:, :mt_len] & first_sentence_mask[:, :mt_len]
# recover src tensor + rest of padding (which will be dealt later in the loss fn)
src_first = first_sentence_mask.sum(-1).min().item()
tensor_src = tensor[:, src_first:].clone()
src_mask = attention_mask[:, src_first:] & (1 - first_sentence_mask)[:, src_first:]
return tensor_mt, tensor_src, mt_mask, src_mask
@staticmethod
def get_bounds_from_first_piece_mask(first_piece_mask):
"""
Transforms a binary mask of first word piece positions to 0-indexed bounds tensor.
E.g.
[[1, 0, 0, 1, 0, 1, 0, 1],
[1, 1, 0, 0, 0, 1, 0, 0]]
will be transformed to
[[0, 3, 5, 6],
[0, 1, 5, -1]]
where -1 indicates pad positions.
"""
bs, seq_len = first_piece_mask.shape
device = first_piece_mask.device
bounds_highest_index = 999999
bounds = torch.arange(seq_len, device=device).unsqueeze(0).expand(bs, -1)
bounds = bounds.masked_fill(~first_piece_mask.bool(), bounds_highest_index)
bounds = bounds.sort(dim=-1)[0]
bounds_mask = (bounds != bounds_highest_index).bool()
max_num_bounds = bounds_mask.long().sum(-1).max().item()
bounds = bounds[:, :max_num_bounds]
bounds[bounds == bounds_highest_index] = -1
bounds_mask = (bounds != -1).bool()
return bounds, bounds_mask
@staticmethod
def aggregate_word_pieces(hidden_states, bounds, method='first'):
"""
Aggregate hidden states according to word piece tokenization.
Args:
hidden_states (torch.Tensor): output of BERT. Shape of (bs, ts, hdim)
bounds (torch.LongTensor): the indexes where the word pieces start.
Shape of (bs, ts)
e.g. Welcome to the jungle -> _Wel come _to _the _jungle
bounds[0] = [0, 2, 3, 4]
indexes for padding positions are expected to be equal to -1
method (str): the strategy used to get a representation of a word
based on its word pices. Possible choices are:
'first' = take the vector of the first word piece
'sum' = take the sum of word pieces vectors
'mean' = take the average of word pieces vectors
'max' = take the max of word pieces vectors
Returns:
torch.Tensor (bs, original_sequence_length, hdim)
"""
bs, ts, hidden_dim = hidden_states.size()
r = torch.arange(bs, device=hidden_states.device).unsqueeze(1)
if method == 'first':
return hidden_states[r, bounds]
elif method == 'sum' or method == 'mean':
neg_one_indexes = bounds.new_zeros(bs, 1) - 1
extended_bounds = torch.cat((bounds[:, 1:], neg_one_indexes), dim=1)
last_idx = (extended_bounds != -1).sum(dim=1).unsqueeze(-1) - 1
extended_bounds[r, last_idx + 1] = extended_bounds[r, last_idx] + 1
shifted_bounds = extended_bounds - 1
cumsum = hidden_states.cumsum(dim=1)
cumsum = cumsum[r, shifted_bounds]
zero_values = cumsum.new_zeros(bs, 1, hidden_dim)
shifted_cumsum = torch.cat((zero_values, cumsum[:, :-1]), dim=1)
selected_pieces = cumsum - shifted_cumsum
if method == 'mean':
lens = shifted_bounds + 1 - bounds
lens[lens == 0] = 1 # we should not have a case where lens_ij=0
selected_pieces = selected_pieces / lens.unsqueeze(-1).float()
return selected_pieces
elif method == 'max':
max_bounds_size = (bounds != -1).sum(1).max().item()
max_wordpieces = torch.zeros(bs, max_bounds_size, hidden_dim, device=bounds.device)
for i in range(bs):
bounds_len = (bounds[i] != -1).sum().item()
valid_bounds = bounds[i, :bounds_len].tolist()
valid_bounds.append(valid_bounds[-1] + 1)
slices = zip(valid_bounds[:-1], valid_bounds[1:])
for j, (k1, k2) in enumerate(slices):
x, _ = torch.max(hidden_states[i, k1:k2], dim=0)
max_wordpieces[i, j] = x
return max_wordpieces
else:
raise Exception('Method {} is not implemented'.format(method))
| en | 0.762112 | # for p in self.parameters(): # if p.dim() > 1: # torch.nn.init.xavier_uniform_(p) Produce an estimation by adding a bottleneck computation on top of hidden states from selected layers. Args: hidden_states: selected hidden states with shape of (bs, ts, hdim) embeddings: output of embeddings layer of BERT. Shape of (bs, ts, hdim) attention_mask: binary mask, 1 indicates the positions of valid (non pad) inputs. Shape of (bs, ts) first_sentence_mask: binary mask, 1 indicates the positions of the first sentence. Shape of (bs, ts) first_piece_mask: binary mask, 1 indicates the positions of the first word piece. Shape of (bs, ts) Returns: torch.Tensor with a shape of (bs, hdim) # optionally, aggregate word pieces of k and v # select which vectors to use to represent keys and values # do attention to mt and src jointly # masked average over word pieces to get a single query representation # linear map for k and v # attention # we select scores for alpha=-1 because we will use it with BCELoss and fp16 # we select scores for alpha=-1 because we will use it with BCELoss and fp16 # break attn probas into mt and src # set pad probas to zero (we might have created new pad positions when splitting <mt> from <src>) # renormalize # mt_probas = mt_probas / mt_probas.sum(-1).unsqueeze(-1) # src_probas = src_probas / src_probas.sum(-1).unsqueeze(-1) # do attention to mt and src separately # avg hidden states # do attn of src over mt # do attn of mt over src Split a tensor into two according to the bool first_sentence_mask tensor. It will use attention_mask to get rid of pad positions. It assumes a concatenated input tensor: <mt> <src>. Args: tensor (torch.Tensor): shape of (bs, ts, hdim) first_sentence_mask (torch.LongTensor): boolean tensor, with 1s indicating the positions of <mt> and 0s of <src>. Shape of (bs, ts) attention_mask (torch.LongTensor): mask of pad positions, 1s indicate valid and 0s indicatep pad positions. Shape of (bs, ts) Returns: <mt> torch.Tensor (bs, mt_len, hdim) <src> torch.Tensor (bs, src_len, hdim) mt_mask torch.BoolTensor (bs, mt_len) with 1s indicating valid positions, and 0s pad positions src_mask torch.BoolTensor (bs, src_len) with 1s indicating valid positions, and 0s pad positions # recover mt tensor # recover src tensor + rest of padding (which will be dealt later in the loss fn) Transforms a binary mask of first word piece positions to 0-indexed bounds tensor. E.g. [[1, 0, 0, 1, 0, 1, 0, 1], [1, 1, 0, 0, 0, 1, 0, 0]] will be transformed to [[0, 3, 5, 6], [0, 1, 5, -1]] where -1 indicates pad positions. Aggregate hidden states according to word piece tokenization. Args: hidden_states (torch.Tensor): output of BERT. Shape of (bs, ts, hdim) bounds (torch.LongTensor): the indexes where the word pieces start. Shape of (bs, ts) e.g. Welcome to the jungle -> _Wel come _to _the _jungle bounds[0] = [0, 2, 3, 4] indexes for padding positions are expected to be equal to -1 method (str): the strategy used to get a representation of a word based on its word pices. Possible choices are: 'first' = take the vector of the first word piece 'sum' = take the sum of word pieces vectors 'mean' = take the average of word pieces vectors 'max' = take the max of word pieces vectors Returns: torch.Tensor (bs, original_sequence_length, hdim) # we should not have a case where lens_ij=0 | 2.121845 | 2 |
classification_models/keras_applications/__init__.py | lewlin/classification_models | 6 | 6621383 | <reponame>lewlin/classification_models
import keras
from .keras_applications.keras_applications import *
set_keras_submodules(
backend=keras.backend,
layers=keras.layers,
models=keras.models,
engine=keras.engine,
utils=keras.utils,
)
| import keras
from .keras_applications.keras_applications import *
set_keras_submodules(
backend=keras.backend,
layers=keras.layers,
models=keras.models,
engine=keras.engine,
utils=keras.utils,
) | none | 1 | 1.240423 | 1 | |
ACME/utility/circshift.py | mauriziokovacic/ACME | 3 | 6621384 | <filename>ACME/utility/circshift.py
import numpy
import torch
from .isnumpy import *
from .istorch import *
def circshift(tensor, k, dim=None):
"""
Circularly shifts the input tensor k times along the given dimension
Parameters
----------
tensor : Tensor
the input tensor
k : int
the number of shifts to perform
dim : int (optional)
the dimension along the shift is performed (default is None)
Returns
-------
Tensor
the tensor with shifted values
Raises
------
AssertionError
if input tensor is neither a Numpy or PyTorch tensor
"""
if isnumpy(tensor):
return numpy.roll(tensor, k, axis=dim)
if istorch(tensor):
return torch.roll(tensor, k, dims=dim)
raise RuntimeError('Unknown data type')
| <filename>ACME/utility/circshift.py
import numpy
import torch
from .isnumpy import *
from .istorch import *
def circshift(tensor, k, dim=None):
"""
Circularly shifts the input tensor k times along the given dimension
Parameters
----------
tensor : Tensor
the input tensor
k : int
the number of shifts to perform
dim : int (optional)
the dimension along the shift is performed (default is None)
Returns
-------
Tensor
the tensor with shifted values
Raises
------
AssertionError
if input tensor is neither a Numpy or PyTorch tensor
"""
if isnumpy(tensor):
return numpy.roll(tensor, k, axis=dim)
if istorch(tensor):
return torch.roll(tensor, k, dims=dim)
raise RuntimeError('Unknown data type')
| en | 0.615704 | Circularly shifts the input tensor k times along the given dimension Parameters ---------- tensor : Tensor the input tensor k : int the number of shifts to perform dim : int (optional) the dimension along the shift is performed (default is None) Returns ------- Tensor the tensor with shifted values Raises ------ AssertionError if input tensor is neither a Numpy or PyTorch tensor | 3.323942 | 3 |
config/utils/cache.py | CaladBlogBaal/Victorique | 0 | 6621385 | <reponame>CaladBlogBaal/Victorique
import asyncio
import enum
from functools import wraps
from lru import LRU
# pretty much taken from https://gist.github.com/dlebech/c16a34f735c0c4e9b604
class Strategy(enum.Enum):
lru = 1
raw = 2
def _wrap_coroutine_storage(cache_dict, key, future):
async def wrapper():
val = await future
cache_dict[key] = val
return val
return wrapper()
def _wrap_value_in_coroutine(val):
async def wrapper():
return val
return wrapper()
def cache(maxsize=256, strategy=Strategy.lru):
def memoize(f):
if strategy is Strategy.lru:
__cache = LRU(maxsize)
__stats = __cache.items
elif strategy is Strategy.raw:
__cache = {}
__stats = __cache.items
def make_key(*args, **kwargs):
key = f"{f.__module__}#{f.__name__}#{repr((args, kwargs))}"
return key
@wraps(f)
def wrapper(*args, **kwargs):
key = make_key(*args, **kwargs)
try:
val = __cache[make_key(*args, **kwargs)]
if asyncio.iscoroutinefunction(f):
return _wrap_value_in_coroutine(val)
return val
except KeyError:
val = f(*args, **kwargs)
if asyncio.iscoroutine(val):
return _wrap_coroutine_storage(__cache, key, val)
__cache[key] = val
return val
def __invalidate(*args, **kwargs):
key = make_key(*args, **kwargs)
try:
del __cache[key]
except KeyError:
return False
else:
return True
def __clear():
__cache.clear()
wrapper.get_stats = __stats
wrapper.invalidate = __invalidate
wrapper.clear = __clear
return wrapper
return memoize
| import asyncio
import enum
from functools import wraps
from lru import LRU
# pretty much taken from https://gist.github.com/dlebech/c16a34f735c0c4e9b604
class Strategy(enum.Enum):
lru = 1
raw = 2
def _wrap_coroutine_storage(cache_dict, key, future):
async def wrapper():
val = await future
cache_dict[key] = val
return val
return wrapper()
def _wrap_value_in_coroutine(val):
async def wrapper():
return val
return wrapper()
def cache(maxsize=256, strategy=Strategy.lru):
def memoize(f):
if strategy is Strategy.lru:
__cache = LRU(maxsize)
__stats = __cache.items
elif strategy is Strategy.raw:
__cache = {}
__stats = __cache.items
def make_key(*args, **kwargs):
key = f"{f.__module__}#{f.__name__}#{repr((args, kwargs))}"
return key
@wraps(f)
def wrapper(*args, **kwargs):
key = make_key(*args, **kwargs)
try:
val = __cache[make_key(*args, **kwargs)]
if asyncio.iscoroutinefunction(f):
return _wrap_value_in_coroutine(val)
return val
except KeyError:
val = f(*args, **kwargs)
if asyncio.iscoroutine(val):
return _wrap_coroutine_storage(__cache, key, val)
__cache[key] = val
return val
def __invalidate(*args, **kwargs):
key = make_key(*args, **kwargs)
try:
del __cache[key]
except KeyError:
return False
else:
return True
def __clear():
__cache.clear()
wrapper.get_stats = __stats
wrapper.invalidate = __invalidate
wrapper.clear = __clear
return wrapper
return memoize | en | 0.588404 | # pretty much taken from https://gist.github.com/dlebech/c16a34f735c0c4e9b604 #{f.__name__}#{repr((args, kwargs))}" | 2.62684 | 3 |
__init__.py | muriloventuroso/skill-self-esteem | 0 | 6621386 | from adapt.intent import IntentBuilder
from mycroft.skills.core import MycroftSkill, intent_handler
class SelfEsteemSkill(MycroftSkill):
def __init__(self):
super(SelfEsteemSkill, self).__init__(name="SelfEsteemSkill")
@intent_handler(IntentBuilder("BeautifulIntent").require("Beautiful").optional("pronoun"))
def handle_beautiful_intent(self, message):
self.speak_dialog("you.are.beautiful")
@intent_handler(IntentBuilder("SmartIntent").require("Smart").optional("pronoun"))
def handle_smart_intent(self, message):
self.speak_dialog("you.are.smart")
@intent_handler(IntentBuilder("StupidIntent").require("Smart").optional("pronoun"))
def handle_stupid_intent(self, message):
self.speak_dialog("of.course.not")
self.speak_dialog("you.are.smart")
def create_skill():
return SelfEsteemSkill()
| from adapt.intent import IntentBuilder
from mycroft.skills.core import MycroftSkill, intent_handler
class SelfEsteemSkill(MycroftSkill):
def __init__(self):
super(SelfEsteemSkill, self).__init__(name="SelfEsteemSkill")
@intent_handler(IntentBuilder("BeautifulIntent").require("Beautiful").optional("pronoun"))
def handle_beautiful_intent(self, message):
self.speak_dialog("you.are.beautiful")
@intent_handler(IntentBuilder("SmartIntent").require("Smart").optional("pronoun"))
def handle_smart_intent(self, message):
self.speak_dialog("you.are.smart")
@intent_handler(IntentBuilder("StupidIntent").require("Smart").optional("pronoun"))
def handle_stupid_intent(self, message):
self.speak_dialog("of.course.not")
self.speak_dialog("you.are.smart")
def create_skill():
return SelfEsteemSkill()
| none | 1 | 2.511087 | 3 | |
142-Linked-List-Cycle-II/solution02.py | Eroica-cpp/LeetCode | 7 | 6621387 | #!/usr/bin/python
# ==============================================================================
# Author: <NAME> (<EMAIL>)
# Date: Jun 19, 2015
# Question: 142-Linked-List-Cycle-II
# Link: https://leetcode.com/problems/linked-list-cycle-ii/
# ==============================================================================
# Given a linked list, return the node where the cycle begins. If there is no
# cycle, return null.
# Follow up:
# Can you solve it without using extra space?
# ==============================================================================
# Method: without extra space; two pointers
# Time Complexity: O(n)
# Space Complexity: O(1)
# ==============================================================================
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param head, a ListNode
# @return a list node
def detectCycle(self, head):
if not head or not head.next:
return
p1= p2 = head
while p2 and p2.next:
p1 = p1.next
p2 = p2.next.next
if id(p1) == id(p2):
break
if not p2 or not p2.next:
return
p1 = head
while id(p1) != id(p2):
p1 = p1.next
p2 = p2.next
return p1
| #!/usr/bin/python
# ==============================================================================
# Author: <NAME> (<EMAIL>)
# Date: Jun 19, 2015
# Question: 142-Linked-List-Cycle-II
# Link: https://leetcode.com/problems/linked-list-cycle-ii/
# ==============================================================================
# Given a linked list, return the node where the cycle begins. If there is no
# cycle, return null.
# Follow up:
# Can you solve it without using extra space?
# ==============================================================================
# Method: without extra space; two pointers
# Time Complexity: O(n)
# Space Complexity: O(1)
# ==============================================================================
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param head, a ListNode
# @return a list node
def detectCycle(self, head):
if not head or not head.next:
return
p1= p2 = head
while p2 and p2.next:
p1 = p1.next
p2 = p2.next.next
if id(p1) == id(p2):
break
if not p2 or not p2.next:
return
p1 = head
while id(p1) != id(p2):
p1 = p1.next
p2 = p2.next
return p1
| en | 0.578615 | #!/usr/bin/python # ============================================================================== # Author: <NAME> (<EMAIL>) # Date: Jun 19, 2015 # Question: 142-Linked-List-Cycle-II # Link: https://leetcode.com/problems/linked-list-cycle-ii/ # ============================================================================== # Given a linked list, return the node where the cycle begins. If there is no # cycle, return null. # Follow up: # Can you solve it without using extra space? # ============================================================================== # Method: without extra space; two pointers # Time Complexity: O(n) # Space Complexity: O(1) # ============================================================================== # Definition for singly-linked list. # class ListNode: # def __init__(self, x): # self.val = x # self.next = None # @param head, a ListNode # @return a list node | 3.907689 | 4 |
tests/plugins/test_vrtbe.py | hymer-up/streamlink | 5 | 6621388 | import unittest
from streamlink.plugins.vrtbe import VRTbe
class TestPluginVRTbe(unittest.TestCase):
def test_can_handle_url(self):
should_match = [
# LIVE
"https://www.vrt.be/vrtnu/kanalen/canvas/",
"https://www.vrt.be/vrtnu/kanalen/een/",
"https://www.vrt.be/vrtnu/kanalen/ketnet/",
# VOD
"https://www.vrt.be/vrtnu/a-z/belfast-zoo/1/belfast-zoo-s1a14/",
"https://www.vrt.be/vrtnu/a-z/sporza--korfbal/2017/sporza--korfbal-s2017-sporza-korfbal/",
"https://www.vrt.be/vrtnu/a-z/de-grote-peter-van-de-veire-ochtendshow/2017/de-grote-peter-van-de-veire"
+ "-ochtendshow-s2017--en-parels-voor-de-zwijnen-ook/"
]
for url in should_match:
self.assertTrue(VRTbe.can_handle_url(url))
should_not_match = [
"https://example.com/",
"http://www.local.local/"
]
for url in should_not_match:
self.assertFalse(VRTbe.can_handle_url(url))
| import unittest
from streamlink.plugins.vrtbe import VRTbe
class TestPluginVRTbe(unittest.TestCase):
def test_can_handle_url(self):
should_match = [
# LIVE
"https://www.vrt.be/vrtnu/kanalen/canvas/",
"https://www.vrt.be/vrtnu/kanalen/een/",
"https://www.vrt.be/vrtnu/kanalen/ketnet/",
# VOD
"https://www.vrt.be/vrtnu/a-z/belfast-zoo/1/belfast-zoo-s1a14/",
"https://www.vrt.be/vrtnu/a-z/sporza--korfbal/2017/sporza--korfbal-s2017-sporza-korfbal/",
"https://www.vrt.be/vrtnu/a-z/de-grote-peter-van-de-veire-ochtendshow/2017/de-grote-peter-van-de-veire"
+ "-ochtendshow-s2017--en-parels-voor-de-zwijnen-ook/"
]
for url in should_match:
self.assertTrue(VRTbe.can_handle_url(url))
should_not_match = [
"https://example.com/",
"http://www.local.local/"
]
for url in should_not_match:
self.assertFalse(VRTbe.can_handle_url(url))
| ja | 0.32285 | # LIVE # VOD | 2.619053 | 3 |
src/subcommands/listers/list_projects.py | kevin3/cwl-ica | 8 | 6621389 | <gh_stars>1-10
#!/usr/bin/env python3
"""
List all projects registered in <CWL_ICA_REPO_PATH>/config/projects.yaml
"""
from classes.command import Command
from utils.logging import get_logger
import pandas as pd
from utils.repo import get_tenant_yaml_path, read_yaml, get_project_yaml_path
import os
import sys
from utils.errors import TenantNotFoundError
logger = get_logger()
class ListProjects(Command):
"""Usage:
cwl-ica [options] list-projects help
cwl-ica [options] list-projects [--tenant-name=<"tenant_name">]
Description:
List all available projects, if --tenant-name is not set then tenants from all projects are returned.
If env var CWL_ICA_DEFAULT_TENANT is set and you wish to view projects across all tenants, set --tenant-name to 'all'
Options:
--tenant-name=<tenant name> Optional, filter by tenant-name.
Environment Variables:
CWL_ICA_DEFAULT_TENANT Can be used as an alternative for --tenant-name.
Example:
cwl-ica list-projects --tenant-name "all"
cwl-ica list-projects --tenant-name "tenant name"
cwl-ica list-projects
"""
def __init__(self, command_argv):
# Collect args from doc strings
super().__init__(command_argv)
# Initialise values
self.tenant_name = None
# Check args
self.check_args()
def __call__(self):
"""
Just run through this
:return:
"""
# Check project.yaml exists
project_yaml_path = get_project_yaml_path()
project_list = read_yaml(project_yaml_path)['projects']
# Create pandas df of project yaml path
project_df = pd.DataFrame(project_list)
# Write project to stdout
project_df[["project_name", "project_id", "project_description", "production"]].\
to_markdown(sys.stdout, index=False)
# Create a new line character
print()
def check_args(self):
"""
Check if --tenant-name is defined or CWL_ICA_DEFAULT_TENANT is present
Or if --tenant-name is set to 'all'
:return:
"""
tenant_arg = self.args.get("--tenant-name", None)
# Check if tenant arg is set
if tenant_arg is None:
tenant_arg = os.environ.get("CWL_ICA_DEFAULT_TENANT", None)
# Check if tenant arg is set to all
if tenant_arg is None or tenant_arg == "all":
self.tenant_name = None
else:
self.tenant_name = tenant_arg
# If tenant_name is set, make sure it's present in tenant.yaml
if self.tenant_name is not None:
tenant_yaml_path = get_tenant_yaml_path()
tenant_list = read_yaml(tenant_yaml_path)['tenants']
for tenant in tenant_list:
if tenant.get("tenant_name", None) == self.tenant_name:
break
else:
logger.error(f"Tenant name set to \"{self.tenant_name}\" but "
f"could not find this tenant name in \"{tenant_yaml_path}\"\n")
raise TenantNotFoundError
# Just make sure the project.yaml path exists
_ = get_project_yaml_path()
| #!/usr/bin/env python3
"""
List all projects registered in <CWL_ICA_REPO_PATH>/config/projects.yaml
"""
from classes.command import Command
from utils.logging import get_logger
import pandas as pd
from utils.repo import get_tenant_yaml_path, read_yaml, get_project_yaml_path
import os
import sys
from utils.errors import TenantNotFoundError
logger = get_logger()
class ListProjects(Command):
"""Usage:
cwl-ica [options] list-projects help
cwl-ica [options] list-projects [--tenant-name=<"tenant_name">]
Description:
List all available projects, if --tenant-name is not set then tenants from all projects are returned.
If env var CWL_ICA_DEFAULT_TENANT is set and you wish to view projects across all tenants, set --tenant-name to 'all'
Options:
--tenant-name=<tenant name> Optional, filter by tenant-name.
Environment Variables:
CWL_ICA_DEFAULT_TENANT Can be used as an alternative for --tenant-name.
Example:
cwl-ica list-projects --tenant-name "all"
cwl-ica list-projects --tenant-name "tenant name"
cwl-ica list-projects
"""
def __init__(self, command_argv):
# Collect args from doc strings
super().__init__(command_argv)
# Initialise values
self.tenant_name = None
# Check args
self.check_args()
def __call__(self):
"""
Just run through this
:return:
"""
# Check project.yaml exists
project_yaml_path = get_project_yaml_path()
project_list = read_yaml(project_yaml_path)['projects']
# Create pandas df of project yaml path
project_df = pd.DataFrame(project_list)
# Write project to stdout
project_df[["project_name", "project_id", "project_description", "production"]].\
to_markdown(sys.stdout, index=False)
# Create a new line character
print()
def check_args(self):
"""
Check if --tenant-name is defined or CWL_ICA_DEFAULT_TENANT is present
Or if --tenant-name is set to 'all'
:return:
"""
tenant_arg = self.args.get("--tenant-name", None)
# Check if tenant arg is set
if tenant_arg is None:
tenant_arg = os.environ.get("CWL_ICA_DEFAULT_TENANT", None)
# Check if tenant arg is set to all
if tenant_arg is None or tenant_arg == "all":
self.tenant_name = None
else:
self.tenant_name = tenant_arg
# If tenant_name is set, make sure it's present in tenant.yaml
if self.tenant_name is not None:
tenant_yaml_path = get_tenant_yaml_path()
tenant_list = read_yaml(tenant_yaml_path)['tenants']
for tenant in tenant_list:
if tenant.get("tenant_name", None) == self.tenant_name:
break
else:
logger.error(f"Tenant name set to \"{self.tenant_name}\" but "
f"could not find this tenant name in \"{tenant_yaml_path}\"\n")
raise TenantNotFoundError
# Just make sure the project.yaml path exists
_ = get_project_yaml_path() | en | 0.595095 | #!/usr/bin/env python3 List all projects registered in <CWL_ICA_REPO_PATH>/config/projects.yaml Usage: cwl-ica [options] list-projects help cwl-ica [options] list-projects [--tenant-name=<"tenant_name">] Description: List all available projects, if --tenant-name is not set then tenants from all projects are returned. If env var CWL_ICA_DEFAULT_TENANT is set and you wish to view projects across all tenants, set --tenant-name to 'all' Options: --tenant-name=<tenant name> Optional, filter by tenant-name. Environment Variables: CWL_ICA_DEFAULT_TENANT Can be used as an alternative for --tenant-name. Example: cwl-ica list-projects --tenant-name "all" cwl-ica list-projects --tenant-name "tenant name" cwl-ica list-projects # Collect args from doc strings # Initialise values # Check args Just run through this :return: # Check project.yaml exists # Create pandas df of project yaml path # Write project to stdout # Create a new line character Check if --tenant-name is defined or CWL_ICA_DEFAULT_TENANT is present Or if --tenant-name is set to 'all' :return: # Check if tenant arg is set # Check if tenant arg is set to all # If tenant_name is set, make sure it's present in tenant.yaml # Just make sure the project.yaml path exists | 2.613485 | 3 |
auth/users/views.py | Shamsherocode/docker-django-nginx-gunicorn | 0 | 6621390 | <reponame>Shamsherocode/docker-django-nginx-gunicorn
from encodings import utf_8
from urllib import response
from django.http import HttpResponse
from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from .serializers import UserSerializer
from users.models import User
from rest_framework.exceptions import AuthenticationFailed
import jwt, datetime
import os
from django.conf import settings
# Create your views here.
class RegisterView(APIView):
def post(self, request):
serializer = UserSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
class LoginView(APIView):
def post(self, request):
email = request.data['email']
password = request.data['password']
user = User.objects.filter(email=email).first()
if user is None:
raise AuthenticationFailed('User not found!')
if not user.check_password(password):
raise AuthenticationFailed('Incorrect password!')
payload = {
'id': user.id,
'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=60),
'iat': datetime.datetime.utcnow()
}
token = jwt.encode(payload, 'secret', algorithm='HS256')
response = Response()
response.set_cookie(key='jwt', value=token, httponly=True)
response.data = {
'jwt': token
}
return response
class UserView(APIView):
def get(self, request):
token = request.COOKIES.get('jwt')
if not token:
raise AuthenticationFailed('Unauthenticated user!')
try:
payload = jwt.decode(token, 'secret', algorithms=['HS256'])
except jwt.ExpiredSignatureError:
raise AuthenticationFailed('Token Expire, please login again')
user = User.objects.filter(id=payload['id']).first()
serializer = UserSerializer(user)
return Response(serializer.data)
class LogoutView(APIView):
def post(self, request):
response = Response()
response.delete_cookie('jwt')
response.data = {
'message': 'Logout Successfully'
}
return response
def home(request):
return render(request, 'users/home.html')
def testing(request, filename):
f = open(settings.BASE_DIR+settings.MEDIA_ROOT+filename, 'r')
file_content = f.read()
f.close()
return HttpResponse(file_content, content_type="text/plain")
| from encodings import utf_8
from urllib import response
from django.http import HttpResponse
from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from .serializers import UserSerializer
from users.models import User
from rest_framework.exceptions import AuthenticationFailed
import jwt, datetime
import os
from django.conf import settings
# Create your views here.
class RegisterView(APIView):
def post(self, request):
serializer = UserSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
class LoginView(APIView):
def post(self, request):
email = request.data['email']
password = request.data['password']
user = User.objects.filter(email=email).first()
if user is None:
raise AuthenticationFailed('User not found!')
if not user.check_password(password):
raise AuthenticationFailed('Incorrect password!')
payload = {
'id': user.id,
'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=60),
'iat': datetime.datetime.utcnow()
}
token = jwt.encode(payload, 'secret', algorithm='HS256')
response = Response()
response.set_cookie(key='jwt', value=token, httponly=True)
response.data = {
'jwt': token
}
return response
class UserView(APIView):
def get(self, request):
token = request.COOKIES.get('jwt')
if not token:
raise AuthenticationFailed('Unauthenticated user!')
try:
payload = jwt.decode(token, 'secret', algorithms=['HS256'])
except jwt.ExpiredSignatureError:
raise AuthenticationFailed('Token Expire, please login again')
user = User.objects.filter(id=payload['id']).first()
serializer = UserSerializer(user)
return Response(serializer.data)
class LogoutView(APIView):
def post(self, request):
response = Response()
response.delete_cookie('jwt')
response.data = {
'message': 'Logout Successfully'
}
return response
def home(request):
return render(request, 'users/home.html')
def testing(request, filename):
f = open(settings.BASE_DIR+settings.MEDIA_ROOT+filename, 'r')
file_content = f.read()
f.close()
return HttpResponse(file_content, content_type="text/plain") | en | 0.968116 | # Create your views here. | 2.176442 | 2 |
game/abstract.py | Adeon18/Chess.ucu | 0 | 6621391 | '''
a module representing BoardADT and abstract pieces for bot
'''
from pprint import pprint
from copy import deepcopy
from settings import LETTERS
letters = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'g': 6, 'h': 7}
letters2 = {0: 'a', 1: 'b', 2: 'c', 3: 'd', 4: 'e', 5: 'f', 6: 'g', 7: 'h'}
old_boards = []
def convert_position(string):
'''
convert position in chess notation to standart list indexes
'''
x, y = string[0], string[1]
x = letters[x]
y = 8 - int(y)
return x, y
def convert_position_to_str(pos):
'''
convert standart list indexes to the chess notation
'''
x, y = pos[0], pos[1]
x = letters2[x]
y = 8-y
res = str(x) + str(y)
return res
class AbstractBoardADT:
'''
board data type which contains all the information about the chess game
'''
def __init__(self, copy=None):
'''
initialise an empty board
'''
self.content = [
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]
]
self.moves = 0
if copy:
self.init_from_copy(copy)
def init_from_copy(self, copy):
# print("here")
self.moves, content = copy
for row in content:
for piece in row:
try:
old_type = str(type(piece)).split(".")[1].split("\'")[0]
# print(piece.pos, piece.color)
# print("Abstract" + old_type + "(self, piece.color, piece.pos)")
copied_piece = eval("Abstract" + old_type + "(self, piece.color, piece.pos)")
except IndexError:
pass
# print("Copied to an abstract board:")
# pprint(self.content)
def __getitem__(self, pos):
'''
get item from ADT
pos must be in chess notation: "a1" - "h8"
'''
x, y = convert_position(pos)
return self.content[y][x]
def add_piece(self, piece, position):
'''
add a piece to the board
piece instance must be Piece
'''
x, y = convert_position(position)
self.content[y][x] = piece
def remove_piece(self, position):
'''
remove piece from chess board by it's position
'''
x, y = convert_position(position)
self.content[y][x] = 0
def __str__(self):
'''
return a chess board with pieces being represented as letters
capital letter means white piece, lower case - black piece
'''
result = ""
for i in self.content:
for j in i:
result += str(j) + " "
result += "\n"
return result
def possible_computer_moves(self):
possible_computer_moves = []
for row in self.content:
for piece in row:
if str(piece) == str(piece).lower():
try:
# print(f"{piece} can make following moves: {piece.possible_moves()}")
for move in piece.possible_moves():
# print(f"{piece} at {piece.pos} can move to {move}")
possible_computer_moves.append((piece.pos, move))
except AttributeError:
pass
# print(f"All possible moves for computer: {possible_computer_moves}")
return possible_computer_moves
def revert_last_move(self):
global old_boards
# print("Reverting the following board:")
# print(self)
self.content, self.moves = old_boards[-1]
# print("Reverted:")
# print(self)
for n_row, row in enumerate(self.content):
for n_piece, piece in enumerate(row):
try:
piece.pos = LETTERS[n_piece] + str(8 - n_row)
except AttributeError:
pass
old_boards = old_boards[:-1]
# print(self.content[0][1].pos)
def is_game_over(self):
k_in, K_in = False, False
for row in self.content:
for piece in row:
if str(piece) == "k":
k_in = True
elif str(piece) == "K":
K_in = True
if not all((k_in, K_in)):
return True
return False
class AbstractPiece:
'''
parent class for all pieces
'''
def __init__(self, board, tipe, color, pos):
'''
initialise a piece with its type, one of the following
'p' for pawn
'r' for rook
'q' for queen
'k' for king
'n' for knight
'b' for bishop
white pieces get their tipe as capital letter, black as lower case letter
'''
self.game_board = board
# self.position = position
self.tipe = tipe
self.pos = pos
self.color = color
self.game_board.add_piece(self, self.pos)
def __repr__(self):
return self.tipe
def move(self, next_pos):
"""
Move the figure to the next position
"""
# print("Updating the old_boards with the content:")
# pprint(self.game_board.content)
global old_boards
old_boards.append((deepcopy(self.game_board.content), deepcopy(self.game_board.moves)))
# print("Updated.")
# print(old_boards[-1][0])
# print(repr(self.game_board))
if next_pos:
# Get the next pos
next_pos = convert_position_to_str(next_pos)
# print(next_pos)
# print(isinstance(self, AbstractKing))
if isinstance(self, AbstractRook):
self.castle = False
self.game_board.add_piece(self, next_pos)
self.game_board.remove_piece(self.pos)
self.game_board.moves += 1
elif isinstance(self, AbstractKing):
self.left_castle = False
self.right_castle = False
y = convert_position(self.pos)[1]
if abs(convert_position(self.pos)[0] - convert_position(next_pos)[0]) == 2:
self.game_board.add_piece(self, next_pos)
# print(convert_position(next_pos)[0])
try:
if convert_position(next_pos)[0] <= 4:
self.game_board.content[y][0].move((3, y))
else:
self.game_board.content[y][7].move((5, y))
except AttributeError:
pass
self.game_board.remove_piece(self.pos)
else:
self.game_board.add_piece(self, next_pos)
self.game_board.remove_piece(self.pos)
self.game_board.moves += 1
elif isinstance(self, AbstractPawn):
# print(self.color, next_pos[1])
if int(next_pos[1]) == 8 or int(next_pos[1]) == 1:
self.game_board.add_piece(AbstractQueen(self.game_board, self.color, next_pos), next_pos)
self.game_board.remove_piece(self.pos)
# self.kill()
else:
self.game_board.add_piece(self, next_pos)
self.game_board.remove_piece(self.pos)
self.game_board.moves += 1
else:
self.game_board.add_piece(self, next_pos)
self.game_board.remove_piece(self.pos)
self.game_board.moves += 1
# Update the board
self.pos = next_pos
# print(self.game_board)
# Deselect and change turns
class AbstractPawn(AbstractPiece):
'''
a class representing a pown piece
'''
def __init__(self, board, color, pos):
'''
initialise a pawn with its colour and position
white color is represented as 1
black color is represented as 0
possibility of en passant capture is declared by default as False
'''
if color == 1:
super().__init__(board, 'P', color, pos)
if color == 0:
super().__init__(board, 'p', color, pos)
self.double_move = True
self.en_passant = False
def possible_moves(self):
'''
return a list of all possible moves for piece as names of cells a-h 1-8
'''
possible_moves = []
if self.color == 1:
mod = 1
if self.color == 0:
mod = -1
x, y = convert_position(self.pos)
try:
pos = self.pos[0] + str(int(self.pos[1]) + mod)
if self.game_board[pos] == 0:
possible_moves.append(pos)
except (IndexError, KeyError, AttributeError):
pass
if self.double_move:
try:
if (int(self.pos[1]) == 7 and self.color == 0) or (int(self.pos[1]) == 2 and self.color == 1):
pos = self.pos[0] + str(int(self.pos[1]) + 2*mod)
if self.game_board[pos] == 0:
possible_moves.append(pos)
except (IndexError, KeyError, AttributeError):
pass
try:
pos = letters2[letters[self.pos[0]]-1] + str(int(self.pos[1]) + mod)
if self.game_board[pos].color != self.color:
possible_moves.append(pos)
except (IndexError, KeyError, AttributeError):
pass
try:
pos = letters2[letters[self.pos[0]]+1] + str(int(self.pos[1]) + mod)
if self.game_board[pos].color != self.color:
possible_moves.append(pos)
except (IndexError, KeyError, AttributeError):
pass
return possible_moves
class AbstractKing(AbstractPiece):
'''
a class representing a pown piece
'''
def __init__(self, board, color, pos):
'''
initialise a pawn with its colour and position
white color is represented as 1
black color is represented as 0
possibility of en passant capture is declared by default as False
'''
if color == 1:
super().__init__(board, 'K', color, pos)
if color == 0:
super().__init__(board, 'k', color, pos)
self.left_castle = True
self.right_castle = True
def possible_moves(self):
'''
return a list of all possible moves for piece as names of cells a-h 1-8
'''
possible_moves = []
x, y = convert_position(self.pos)
for i in range(3):
for j in range(3):
try:
pos = (x - 1 + i, y - 1 + j)
if (pos[0] >= 0 and pos[0] <= 7) and (pos[1] >= 0 and pos[1] <= 7):
if self.game_board[convert_position_to_str(pos)] == 0:
if not self.is_checked(convert_position_to_str(pos)):
possible_moves.append(convert_position_to_str(pos))
elif self.game_board[convert_position_to_str(pos)].color != self.color:
if not self.is_checked(convert_position_to_str(pos)):
possible_moves.append(convert_position_to_str(pos))
except (IndexError, KeyError):
pass
if self.left_castle and not self.is_checked(self.pos):
try:
x, y = convert_position(self.pos)
if self.game_board.content[y][0].castle:
castle = True
for i in range(3):
if self.game_board.content[y][x-i-1] != 0:
castle = False
if not self.is_checked(convert_position_to_str(pos)) and castle:
possible_moves.append(convert_position_to_str((x - 2, y)))
except (IndexError, KeyError, AttributeError):
pass
if self.right_castle and not self.is_checked(self.pos):
try:
x, y = convert_position(self.pos)
if self.game_board.content[y][7].castle:
castle = True
for i in range(2):
if self.game_board.content[y][x+i+1] != 0:
castle = False
if not self.is_checked(convert_position_to_str(pos)) and castle:
possible_moves.append(convert_position_to_str((x + 2, y)))
except (IndexError, KeyError, AttributeError):
pass
return possible_moves
def is_checked(self, positionn):
'''
returns True if king is checked, False if not
'''
x, y = convert_position(positionn)
possible_moves = []
for i in range(-2, 5):
for j in range(-2, 5):
try:
if i ** 2 + j ** 2 == 5:
pos = (x + i, y + j)
if pos[0] <= 7 and pos[1] <= 7 and pos[0] >= 0 and pos[1] >= 0:
if self.game_board[convert_position_to_str(pos)] == 0:
possible_moves.append(convert_position_to_str(pos))
elif self.game_board[convert_position_to_str(pos)].color != self.color:
possible_moves.append(convert_position_to_str(pos))
except (IndexError, KeyError):
pass
for poss in possible_moves:
if isinstance(self.game_board[poss], AbstractKnight):
return True
possible_moves = []
x, y = convert_position(positionn)
diagonals = [[[x + i, y + i] for i in range(1, 8)],
[[x + i, y - i] for i in range(1, 8)],
[[x - i, y + i] for i in range(1, 8)],
[[x - i, y - i] for i in range(1, 8)]]
for direction in diagonals:
for position in direction:
try:
if position[0] < 0 or position[1] < 0 or position[0] > 7 or position[1] > 7:
break
pos = (position[0], position[1])
if self.game_board[convert_position_to_str(pos)] == 0:
possible_moves.append(convert_position_to_str(pos))
elif self.game_board[convert_position_to_str(pos)].color != self.color:
possible_moves.append(convert_position_to_str(pos))
break
else:
break
except (IndexError, KeyError):
pass
for poss in possible_moves:
x, y = convert_position(poss)
if isinstance(self.game_board[poss], AbstractBishop) or isinstance(self.game_board[poss], AbstractQueen):
return True
possible_moves = []
x, y = convert_position(positionn)
cross = [[[x + i, y] for i in range(1, 8 - x)],
[[x - i, y] for i in range(1, x + 1)],
[[x, y + i] for i in range(1, 8 - y)],
[[x, y - i] for i in range(1, y + 1)]]
for direction in cross:
for position in direction:
try:
if position[0] < 0 or position[1] < 0 or position[0] > 7 or position[1] > 7:
break
pos = (position[0], position[1])
if self.game_board[convert_position_to_str(pos)] == 0:
possible_moves.append(convert_position_to_str(pos))
elif self.game_board[convert_position_to_str(pos)].color != self.color:
possible_moves.append(convert_position_to_str(pos))
break
else:
break
except (IndexError, KeyError):
pass
for poss in possible_moves:
x, y = convert_position(poss)
if isinstance(self.game_board[poss], AbstractRook) or isinstance(self.game_board[poss], AbstractQueen):
return True
return False
class AbstractKnight(AbstractPiece):
'''
a class representing a pown piece
'''
def __init__(self, board, color, pos):
'''
initialise a pawn with its colour and position
white color is represented as 1
black color is represented as 0
possibility of en passant capture is declared by default as False
'''
if color == 1:
super().__init__(board, 'N', color, pos)
if color == 0:
super().__init__(board, 'n', color, pos)
def possible_moves(self):
'''
return a list of all possible moves for piece as names of cells a-h 1-8
'''
possible_moves = []
x, y = convert_position(self.pos)
for i in range(-2, 5):
for j in range(-2, 5):
try:
if i ** 2 + j ** 2 == 5:
pos = (x + i, y + j)
if pos[0] <= 7 and pos[1] <= 7 and pos[0] >= 0 and pos[1] >= 0:
if self.game_board[convert_position_to_str(pos)] == 0:
possible_moves.append(convert_position_to_str(pos))
elif self.game_board[convert_position_to_str(pos)].color != self.color:
possible_moves.append(convert_position_to_str(pos))
except (IndexError, KeyError):
pass
return possible_moves
class AbstractBishop(AbstractPiece):
'''
a class representing a pown piece
'''
def __init__(self, board, color, pos):
'''
initialise a pawn with its colour and position
white color is represented as 1
black color is represented as 0
possibility of en passant capture is declared by default as False
'''
if color == 1:
super().__init__(board, 'B', color, pos)
if color == 0:
super().__init__(board, 'b', color, pos)
def possible_moves(self):
'''
return a list of all possible moves for piece as names of cells a-h 1-8
'''
possible_moves = []
x, y = convert_position(self.pos)
diagonals = [[[x + i, y + i] for i in range(1, 8)],
[[x + i, y - i] for i in range(1, 8)],
[[x - i, y + i] for i in range(1, 8)],
[[x - i, y - i] for i in range(1, 8)]]
for direction in diagonals:
for position in direction:
try:
if position[0] < 0 or position[1] < 0 or position[0] > 7 or position[1] > 7:
break
pos = (position[0], position[1])
if self.game_board[convert_position_to_str(pos)] == 0:
possible_moves.append(convert_position_to_str(pos))
elif self.game_board[convert_position_to_str(pos)].color != self.color:
possible_moves.append(convert_position_to_str(pos))
break
else:
break
except (IndexError, KeyError):
pass
return possible_moves
class AbstractRook(AbstractPiece):
'''
a class representing a pown piece
'''
def __init__(self, board, color, pos):
'''
initialise a pawn with its colour and position
white color is represented as 1
black color is represented as 0
possibility of en passant capture is declared by default as False
'''
if color == 1:
super().__init__(board, 'R', color, pos)
if color == 0:
super().__init__(board, 'r', color, pos)
self.castle = True
def possible_moves(self):
'''
return a list of all possible moves for piece as names of cells a-h 1-8
'''
possible_moves = []
x, y = convert_position(self.pos)
cross = [[[x + i, y] for i in range(1, 8 - x)],
[[x - i, y] for i in range(1, x + 1)],
[[x, y + i] for i in range(1, 8 - y)],
[[x, y - i] for i in range(1, y + 1)]]
for direction in cross:
for position in direction:
try:
if position[0] < 0 or position[1] < 0 or position[0] > 7 or position[1] > 7:
break
pos = (position[0], position[1])
if self.game_board[convert_position_to_str(pos)] == 0:
possible_moves.append(convert_position_to_str(pos))
elif self.game_board[convert_position_to_str(pos)].color != self.color:
possible_moves.append(convert_position_to_str(pos))
break
else:
break
except (IndexError, KeyError):
pass
return possible_moves
class AbstractQueen(AbstractPiece):
'''
a class representing a pown piece
'''
def __init__(self, board, color, pos):
'''
initialise a pawn with its colour and position
white color is represented as 1
black color is represented as 0
possibility of en passant capture is declared by default as False
'''
if color == 1:
super().__init__(board, 'Q', color, pos)
if color == 0:
super().__init__(board, 'q', color, pos)
def possible_moves(self):
'''
return a list of all possible moves for piece as names of cells a-h 1-8
'''
possible_moves = []
x, y = convert_position(self.pos)
cross = [[[x + i, y] for i in range(1, 8 - x)],
[[x - i, y] for i in range(1, x + 1)],
[[x, y + i] for i in range(1, 8 - y)],
[[x, y - i] for i in range(1, y + 1)]]
for direction in cross:
for position in direction:
try:
if position[0] < 0 or position[1] < 0 or position[0] > 7 or position[1] > 7:
break
pos = (position[0], position[1])
if self.game_board[convert_position_to_str(pos)] == 0:
possible_moves.append(convert_position_to_str(pos))
elif self.game_board[convert_position_to_str(pos)].color != self.color:
possible_moves.append(convert_position_to_str(pos))
break
else:
break
except (IndexError, KeyError):
pass
diagonals = [[[x + i, y + i] for i in range(1, 8)],
[[x + i, y - i] for i in range(1, 8)],
[[x - i, y + i] for i in range(1, 8)],
[[x - i, y - i] for i in range(1, 8)]]
for direction in diagonals:
for position in direction:
try:
if position[0] < 0 or position[1] < 0 or position[0] > 7 or position[1] > 7:
break
pos = (position[0], position[1])
if self.game_board[convert_position_to_str(pos)] == 0:
possible_moves.append(convert_position_to_str(pos))
elif self.game_board[convert_position_to_str(pos)].color != self.color:
possible_moves.append(convert_position_to_str(pos))
break
else:
break
except (IndexError, KeyError):
pass
return possible_moves
if __name__ == "__main__":
board1 = AbstractBoardADT()
AbstractPawn(board1, 1, "a2")
AbstractPawn(board1, 1, "b2")
AbstractPawn(board1, 1, "c2")
AbstractPawn(board1, 1, "d2")
AbstractPawn(board1, 1, "e2")
AbstractPawn(board1, 1, "f2")
AbstractPawn(board1, 1, "g2")
AbstractPawn(board1, 1, "h2")
AbstractKing(board1, 1, "e1")
AbstractQueen(board1, 1, "d1")
AbstractKnight(board1, 1, "b1")
AbstractKnight(board1, 1, "g1")
AbstractBishop(board1, 1, "c1")
AbstractBishop(board1, 1, "f1")
AbstractRook(board1, 1, "a1")
AbstractRook(board1, 1, "h1")
# Black
p1 = AbstractPawn(board1, 0, "a7")
AbstractPawn(board1, 0, "b7")
AbstractPawn(board1, 0, "c7")
AbstractPawn(board1, 0, "d7")
AbstractPawn(board1, 0, "e7")
AbstractPawn(board1, 0, "f7")
AbstractPawn(board1, 0, "g7")
AbstractPawn(board1, 0, "h7")
AbstractKing(board1, 0, "e8")
AbstractQueen(board1, 0, "d8")
AbstractKnight(board1, 0, "b8")
AbstractKnight(board1, 0, "g8")
b1 = AbstractBishop(board1, 0, "c8")
AbstractBishop(board1, 0, "f8")
r1 = AbstractRook(board1, 0, "a8")
AbstractRook(board1, 0, "h8")
# b1.move(())
# print(p1.possible_moves())
print(board1)
p1.move((0, 3))
print(board1)
# print(r1.possible_moves())
board1.revert_last_move()
print(board1)
pprint(board1.content)
| '''
a module representing BoardADT and abstract pieces for bot
'''
from pprint import pprint
from copy import deepcopy
from settings import LETTERS
letters = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'g': 6, 'h': 7}
letters2 = {0: 'a', 1: 'b', 2: 'c', 3: 'd', 4: 'e', 5: 'f', 6: 'g', 7: 'h'}
old_boards = []
def convert_position(string):
'''
convert position in chess notation to standart list indexes
'''
x, y = string[0], string[1]
x = letters[x]
y = 8 - int(y)
return x, y
def convert_position_to_str(pos):
'''
convert standart list indexes to the chess notation
'''
x, y = pos[0], pos[1]
x = letters2[x]
y = 8-y
res = str(x) + str(y)
return res
class AbstractBoardADT:
'''
board data type which contains all the information about the chess game
'''
def __init__(self, copy=None):
'''
initialise an empty board
'''
self.content = [
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]
]
self.moves = 0
if copy:
self.init_from_copy(copy)
def init_from_copy(self, copy):
# print("here")
self.moves, content = copy
for row in content:
for piece in row:
try:
old_type = str(type(piece)).split(".")[1].split("\'")[0]
# print(piece.pos, piece.color)
# print("Abstract" + old_type + "(self, piece.color, piece.pos)")
copied_piece = eval("Abstract" + old_type + "(self, piece.color, piece.pos)")
except IndexError:
pass
# print("Copied to an abstract board:")
# pprint(self.content)
def __getitem__(self, pos):
'''
get item from ADT
pos must be in chess notation: "a1" - "h8"
'''
x, y = convert_position(pos)
return self.content[y][x]
def add_piece(self, piece, position):
'''
add a piece to the board
piece instance must be Piece
'''
x, y = convert_position(position)
self.content[y][x] = piece
def remove_piece(self, position):
'''
remove piece from chess board by it's position
'''
x, y = convert_position(position)
self.content[y][x] = 0
def __str__(self):
'''
return a chess board with pieces being represented as letters
capital letter means white piece, lower case - black piece
'''
result = ""
for i in self.content:
for j in i:
result += str(j) + " "
result += "\n"
return result
def possible_computer_moves(self):
possible_computer_moves = []
for row in self.content:
for piece in row:
if str(piece) == str(piece).lower():
try:
# print(f"{piece} can make following moves: {piece.possible_moves()}")
for move in piece.possible_moves():
# print(f"{piece} at {piece.pos} can move to {move}")
possible_computer_moves.append((piece.pos, move))
except AttributeError:
pass
# print(f"All possible moves for computer: {possible_computer_moves}")
return possible_computer_moves
def revert_last_move(self):
global old_boards
# print("Reverting the following board:")
# print(self)
self.content, self.moves = old_boards[-1]
# print("Reverted:")
# print(self)
for n_row, row in enumerate(self.content):
for n_piece, piece in enumerate(row):
try:
piece.pos = LETTERS[n_piece] + str(8 - n_row)
except AttributeError:
pass
old_boards = old_boards[:-1]
# print(self.content[0][1].pos)
def is_game_over(self):
k_in, K_in = False, False
for row in self.content:
for piece in row:
if str(piece) == "k":
k_in = True
elif str(piece) == "K":
K_in = True
if not all((k_in, K_in)):
return True
return False
class AbstractPiece:
'''
parent class for all pieces
'''
def __init__(self, board, tipe, color, pos):
'''
initialise a piece with its type, one of the following
'p' for pawn
'r' for rook
'q' for queen
'k' for king
'n' for knight
'b' for bishop
white pieces get their tipe as capital letter, black as lower case letter
'''
self.game_board = board
# self.position = position
self.tipe = tipe
self.pos = pos
self.color = color
self.game_board.add_piece(self, self.pos)
def __repr__(self):
return self.tipe
def move(self, next_pos):
"""
Move the figure to the next position
"""
# print("Updating the old_boards with the content:")
# pprint(self.game_board.content)
global old_boards
old_boards.append((deepcopy(self.game_board.content), deepcopy(self.game_board.moves)))
# print("Updated.")
# print(old_boards[-1][0])
# print(repr(self.game_board))
if next_pos:
# Get the next pos
next_pos = convert_position_to_str(next_pos)
# print(next_pos)
# print(isinstance(self, AbstractKing))
if isinstance(self, AbstractRook):
self.castle = False
self.game_board.add_piece(self, next_pos)
self.game_board.remove_piece(self.pos)
self.game_board.moves += 1
elif isinstance(self, AbstractKing):
self.left_castle = False
self.right_castle = False
y = convert_position(self.pos)[1]
if abs(convert_position(self.pos)[0] - convert_position(next_pos)[0]) == 2:
self.game_board.add_piece(self, next_pos)
# print(convert_position(next_pos)[0])
try:
if convert_position(next_pos)[0] <= 4:
self.game_board.content[y][0].move((3, y))
else:
self.game_board.content[y][7].move((5, y))
except AttributeError:
pass
self.game_board.remove_piece(self.pos)
else:
self.game_board.add_piece(self, next_pos)
self.game_board.remove_piece(self.pos)
self.game_board.moves += 1
elif isinstance(self, AbstractPawn):
# print(self.color, next_pos[1])
if int(next_pos[1]) == 8 or int(next_pos[1]) == 1:
self.game_board.add_piece(AbstractQueen(self.game_board, self.color, next_pos), next_pos)
self.game_board.remove_piece(self.pos)
# self.kill()
else:
self.game_board.add_piece(self, next_pos)
self.game_board.remove_piece(self.pos)
self.game_board.moves += 1
else:
self.game_board.add_piece(self, next_pos)
self.game_board.remove_piece(self.pos)
self.game_board.moves += 1
# Update the board
self.pos = next_pos
# print(self.game_board)
# Deselect and change turns
class AbstractPawn(AbstractPiece):
'''
a class representing a pown piece
'''
def __init__(self, board, color, pos):
'''
initialise a pawn with its colour and position
white color is represented as 1
black color is represented as 0
possibility of en passant capture is declared by default as False
'''
if color == 1:
super().__init__(board, 'P', color, pos)
if color == 0:
super().__init__(board, 'p', color, pos)
self.double_move = True
self.en_passant = False
def possible_moves(self):
'''
return a list of all possible moves for piece as names of cells a-h 1-8
'''
possible_moves = []
if self.color == 1:
mod = 1
if self.color == 0:
mod = -1
x, y = convert_position(self.pos)
try:
pos = self.pos[0] + str(int(self.pos[1]) + mod)
if self.game_board[pos] == 0:
possible_moves.append(pos)
except (IndexError, KeyError, AttributeError):
pass
if self.double_move:
try:
if (int(self.pos[1]) == 7 and self.color == 0) or (int(self.pos[1]) == 2 and self.color == 1):
pos = self.pos[0] + str(int(self.pos[1]) + 2*mod)
if self.game_board[pos] == 0:
possible_moves.append(pos)
except (IndexError, KeyError, AttributeError):
pass
try:
pos = letters2[letters[self.pos[0]]-1] + str(int(self.pos[1]) + mod)
if self.game_board[pos].color != self.color:
possible_moves.append(pos)
except (IndexError, KeyError, AttributeError):
pass
try:
pos = letters2[letters[self.pos[0]]+1] + str(int(self.pos[1]) + mod)
if self.game_board[pos].color != self.color:
possible_moves.append(pos)
except (IndexError, KeyError, AttributeError):
pass
return possible_moves
class AbstractKing(AbstractPiece):
'''
a class representing a pown piece
'''
def __init__(self, board, color, pos):
'''
initialise a pawn with its colour and position
white color is represented as 1
black color is represented as 0
possibility of en passant capture is declared by default as False
'''
if color == 1:
super().__init__(board, 'K', color, pos)
if color == 0:
super().__init__(board, 'k', color, pos)
self.left_castle = True
self.right_castle = True
def possible_moves(self):
'''
return a list of all possible moves for piece as names of cells a-h 1-8
'''
possible_moves = []
x, y = convert_position(self.pos)
for i in range(3):
for j in range(3):
try:
pos = (x - 1 + i, y - 1 + j)
if (pos[0] >= 0 and pos[0] <= 7) and (pos[1] >= 0 and pos[1] <= 7):
if self.game_board[convert_position_to_str(pos)] == 0:
if not self.is_checked(convert_position_to_str(pos)):
possible_moves.append(convert_position_to_str(pos))
elif self.game_board[convert_position_to_str(pos)].color != self.color:
if not self.is_checked(convert_position_to_str(pos)):
possible_moves.append(convert_position_to_str(pos))
except (IndexError, KeyError):
pass
if self.left_castle and not self.is_checked(self.pos):
try:
x, y = convert_position(self.pos)
if self.game_board.content[y][0].castle:
castle = True
for i in range(3):
if self.game_board.content[y][x-i-1] != 0:
castle = False
if not self.is_checked(convert_position_to_str(pos)) and castle:
possible_moves.append(convert_position_to_str((x - 2, y)))
except (IndexError, KeyError, AttributeError):
pass
if self.right_castle and not self.is_checked(self.pos):
try:
x, y = convert_position(self.pos)
if self.game_board.content[y][7].castle:
castle = True
for i in range(2):
if self.game_board.content[y][x+i+1] != 0:
castle = False
if not self.is_checked(convert_position_to_str(pos)) and castle:
possible_moves.append(convert_position_to_str((x + 2, y)))
except (IndexError, KeyError, AttributeError):
pass
return possible_moves
def is_checked(self, positionn):
'''
returns True if king is checked, False if not
'''
x, y = convert_position(positionn)
possible_moves = []
for i in range(-2, 5):
for j in range(-2, 5):
try:
if i ** 2 + j ** 2 == 5:
pos = (x + i, y + j)
if pos[0] <= 7 and pos[1] <= 7 and pos[0] >= 0 and pos[1] >= 0:
if self.game_board[convert_position_to_str(pos)] == 0:
possible_moves.append(convert_position_to_str(pos))
elif self.game_board[convert_position_to_str(pos)].color != self.color:
possible_moves.append(convert_position_to_str(pos))
except (IndexError, KeyError):
pass
for poss in possible_moves:
if isinstance(self.game_board[poss], AbstractKnight):
return True
possible_moves = []
x, y = convert_position(positionn)
diagonals = [[[x + i, y + i] for i in range(1, 8)],
[[x + i, y - i] for i in range(1, 8)],
[[x - i, y + i] for i in range(1, 8)],
[[x - i, y - i] for i in range(1, 8)]]
for direction in diagonals:
for position in direction:
try:
if position[0] < 0 or position[1] < 0 or position[0] > 7 or position[1] > 7:
break
pos = (position[0], position[1])
if self.game_board[convert_position_to_str(pos)] == 0:
possible_moves.append(convert_position_to_str(pos))
elif self.game_board[convert_position_to_str(pos)].color != self.color:
possible_moves.append(convert_position_to_str(pos))
break
else:
break
except (IndexError, KeyError):
pass
for poss in possible_moves:
x, y = convert_position(poss)
if isinstance(self.game_board[poss], AbstractBishop) or isinstance(self.game_board[poss], AbstractQueen):
return True
possible_moves = []
x, y = convert_position(positionn)
cross = [[[x + i, y] for i in range(1, 8 - x)],
[[x - i, y] for i in range(1, x + 1)],
[[x, y + i] for i in range(1, 8 - y)],
[[x, y - i] for i in range(1, y + 1)]]
for direction in cross:
for position in direction:
try:
if position[0] < 0 or position[1] < 0 or position[0] > 7 or position[1] > 7:
break
pos = (position[0], position[1])
if self.game_board[convert_position_to_str(pos)] == 0:
possible_moves.append(convert_position_to_str(pos))
elif self.game_board[convert_position_to_str(pos)].color != self.color:
possible_moves.append(convert_position_to_str(pos))
break
else:
break
except (IndexError, KeyError):
pass
for poss in possible_moves:
x, y = convert_position(poss)
if isinstance(self.game_board[poss], AbstractRook) or isinstance(self.game_board[poss], AbstractQueen):
return True
return False
class AbstractKnight(AbstractPiece):
'''
a class representing a pown piece
'''
def __init__(self, board, color, pos):
'''
initialise a pawn with its colour and position
white color is represented as 1
black color is represented as 0
possibility of en passant capture is declared by default as False
'''
if color == 1:
super().__init__(board, 'N', color, pos)
if color == 0:
super().__init__(board, 'n', color, pos)
def possible_moves(self):
'''
return a list of all possible moves for piece as names of cells a-h 1-8
'''
possible_moves = []
x, y = convert_position(self.pos)
for i in range(-2, 5):
for j in range(-2, 5):
try:
if i ** 2 + j ** 2 == 5:
pos = (x + i, y + j)
if pos[0] <= 7 and pos[1] <= 7 and pos[0] >= 0 and pos[1] >= 0:
if self.game_board[convert_position_to_str(pos)] == 0:
possible_moves.append(convert_position_to_str(pos))
elif self.game_board[convert_position_to_str(pos)].color != self.color:
possible_moves.append(convert_position_to_str(pos))
except (IndexError, KeyError):
pass
return possible_moves
class AbstractBishop(AbstractPiece):
'''
a class representing a pown piece
'''
def __init__(self, board, color, pos):
'''
initialise a pawn with its colour and position
white color is represented as 1
black color is represented as 0
possibility of en passant capture is declared by default as False
'''
if color == 1:
super().__init__(board, 'B', color, pos)
if color == 0:
super().__init__(board, 'b', color, pos)
def possible_moves(self):
'''
return a list of all possible moves for piece as names of cells a-h 1-8
'''
possible_moves = []
x, y = convert_position(self.pos)
diagonals = [[[x + i, y + i] for i in range(1, 8)],
[[x + i, y - i] for i in range(1, 8)],
[[x - i, y + i] for i in range(1, 8)],
[[x - i, y - i] for i in range(1, 8)]]
for direction in diagonals:
for position in direction:
try:
if position[0] < 0 or position[1] < 0 or position[0] > 7 or position[1] > 7:
break
pos = (position[0], position[1])
if self.game_board[convert_position_to_str(pos)] == 0:
possible_moves.append(convert_position_to_str(pos))
elif self.game_board[convert_position_to_str(pos)].color != self.color:
possible_moves.append(convert_position_to_str(pos))
break
else:
break
except (IndexError, KeyError):
pass
return possible_moves
class AbstractRook(AbstractPiece):
'''
a class representing a pown piece
'''
def __init__(self, board, color, pos):
'''
initialise a pawn with its colour and position
white color is represented as 1
black color is represented as 0
possibility of en passant capture is declared by default as False
'''
if color == 1:
super().__init__(board, 'R', color, pos)
if color == 0:
super().__init__(board, 'r', color, pos)
self.castle = True
def possible_moves(self):
'''
return a list of all possible moves for piece as names of cells a-h 1-8
'''
possible_moves = []
x, y = convert_position(self.pos)
cross = [[[x + i, y] for i in range(1, 8 - x)],
[[x - i, y] for i in range(1, x + 1)],
[[x, y + i] for i in range(1, 8 - y)],
[[x, y - i] for i in range(1, y + 1)]]
for direction in cross:
for position in direction:
try:
if position[0] < 0 or position[1] < 0 or position[0] > 7 or position[1] > 7:
break
pos = (position[0], position[1])
if self.game_board[convert_position_to_str(pos)] == 0:
possible_moves.append(convert_position_to_str(pos))
elif self.game_board[convert_position_to_str(pos)].color != self.color:
possible_moves.append(convert_position_to_str(pos))
break
else:
break
except (IndexError, KeyError):
pass
return possible_moves
class AbstractQueen(AbstractPiece):
'''
a class representing a pown piece
'''
def __init__(self, board, color, pos):
'''
initialise a pawn with its colour and position
white color is represented as 1
black color is represented as 0
possibility of en passant capture is declared by default as False
'''
if color == 1:
super().__init__(board, 'Q', color, pos)
if color == 0:
super().__init__(board, 'q', color, pos)
def possible_moves(self):
'''
return a list of all possible moves for piece as names of cells a-h 1-8
'''
possible_moves = []
x, y = convert_position(self.pos)
cross = [[[x + i, y] for i in range(1, 8 - x)],
[[x - i, y] for i in range(1, x + 1)],
[[x, y + i] for i in range(1, 8 - y)],
[[x, y - i] for i in range(1, y + 1)]]
for direction in cross:
for position in direction:
try:
if position[0] < 0 or position[1] < 0 or position[0] > 7 or position[1] > 7:
break
pos = (position[0], position[1])
if self.game_board[convert_position_to_str(pos)] == 0:
possible_moves.append(convert_position_to_str(pos))
elif self.game_board[convert_position_to_str(pos)].color != self.color:
possible_moves.append(convert_position_to_str(pos))
break
else:
break
except (IndexError, KeyError):
pass
diagonals = [[[x + i, y + i] for i in range(1, 8)],
[[x + i, y - i] for i in range(1, 8)],
[[x - i, y + i] for i in range(1, 8)],
[[x - i, y - i] for i in range(1, 8)]]
for direction in diagonals:
for position in direction:
try:
if position[0] < 0 or position[1] < 0 or position[0] > 7 or position[1] > 7:
break
pos = (position[0], position[1])
if self.game_board[convert_position_to_str(pos)] == 0:
possible_moves.append(convert_position_to_str(pos))
elif self.game_board[convert_position_to_str(pos)].color != self.color:
possible_moves.append(convert_position_to_str(pos))
break
else:
break
except (IndexError, KeyError):
pass
return possible_moves
if __name__ == "__main__":
board1 = AbstractBoardADT()
AbstractPawn(board1, 1, "a2")
AbstractPawn(board1, 1, "b2")
AbstractPawn(board1, 1, "c2")
AbstractPawn(board1, 1, "d2")
AbstractPawn(board1, 1, "e2")
AbstractPawn(board1, 1, "f2")
AbstractPawn(board1, 1, "g2")
AbstractPawn(board1, 1, "h2")
AbstractKing(board1, 1, "e1")
AbstractQueen(board1, 1, "d1")
AbstractKnight(board1, 1, "b1")
AbstractKnight(board1, 1, "g1")
AbstractBishop(board1, 1, "c1")
AbstractBishop(board1, 1, "f1")
AbstractRook(board1, 1, "a1")
AbstractRook(board1, 1, "h1")
# Black
p1 = AbstractPawn(board1, 0, "a7")
AbstractPawn(board1, 0, "b7")
AbstractPawn(board1, 0, "c7")
AbstractPawn(board1, 0, "d7")
AbstractPawn(board1, 0, "e7")
AbstractPawn(board1, 0, "f7")
AbstractPawn(board1, 0, "g7")
AbstractPawn(board1, 0, "h7")
AbstractKing(board1, 0, "e8")
AbstractQueen(board1, 0, "d8")
AbstractKnight(board1, 0, "b8")
AbstractKnight(board1, 0, "g8")
b1 = AbstractBishop(board1, 0, "c8")
AbstractBishop(board1, 0, "f8")
r1 = AbstractRook(board1, 0, "a8")
AbstractRook(board1, 0, "h8")
# b1.move(())
# print(p1.possible_moves())
print(board1)
p1.move((0, 3))
print(board1)
# print(r1.possible_moves())
board1.revert_last_move()
print(board1)
pprint(board1.content)
| en | 0.869091 | a module representing BoardADT and abstract pieces for bot convert position in chess notation to standart list indexes convert standart list indexes to the chess notation board data type which contains all the information about the chess game initialise an empty board # print("here") # print(piece.pos, piece.color) # print("Abstract" + old_type + "(self, piece.color, piece.pos)") # print("Copied to an abstract board:") # pprint(self.content) get item from ADT pos must be in chess notation: "a1" - "h8" add a piece to the board piece instance must be Piece remove piece from chess board by it's position return a chess board with pieces being represented as letters capital letter means white piece, lower case - black piece # print(f"{piece} can make following moves: {piece.possible_moves()}") # print(f"{piece} at {piece.pos} can move to {move}") # print(f"All possible moves for computer: {possible_computer_moves}") # print("Reverting the following board:") # print(self) # print("Reverted:") # print(self) # print(self.content[0][1].pos) parent class for all pieces initialise a piece with its type, one of the following 'p' for pawn 'r' for rook 'q' for queen 'k' for king 'n' for knight 'b' for bishop white pieces get their tipe as capital letter, black as lower case letter # self.position = position Move the figure to the next position # print("Updating the old_boards with the content:") # pprint(self.game_board.content) # print("Updated.") # print(old_boards[-1][0]) # print(repr(self.game_board)) # Get the next pos # print(next_pos) # print(isinstance(self, AbstractKing)) # print(convert_position(next_pos)[0]) # print(self.color, next_pos[1]) # self.kill() # Update the board # print(self.game_board) # Deselect and change turns a class representing a pown piece initialise a pawn with its colour and position white color is represented as 1 black color is represented as 0 possibility of en passant capture is declared by default as False return a list of all possible moves for piece as names of cells a-h 1-8 a class representing a pown piece initialise a pawn with its colour and position white color is represented as 1 black color is represented as 0 possibility of en passant capture is declared by default as False return a list of all possible moves for piece as names of cells a-h 1-8 returns True if king is checked, False if not a class representing a pown piece initialise a pawn with its colour and position white color is represented as 1 black color is represented as 0 possibility of en passant capture is declared by default as False return a list of all possible moves for piece as names of cells a-h 1-8 a class representing a pown piece initialise a pawn with its colour and position white color is represented as 1 black color is represented as 0 possibility of en passant capture is declared by default as False return a list of all possible moves for piece as names of cells a-h 1-8 a class representing a pown piece initialise a pawn with its colour and position white color is represented as 1 black color is represented as 0 possibility of en passant capture is declared by default as False return a list of all possible moves for piece as names of cells a-h 1-8 a class representing a pown piece initialise a pawn with its colour and position white color is represented as 1 black color is represented as 0 possibility of en passant capture is declared by default as False return a list of all possible moves for piece as names of cells a-h 1-8 # Black # b1.move(()) # print(p1.possible_moves()) # print(r1.possible_moves()) | 3.800457 | 4 |
useless_data.py | pan93412/tgggbot | 3 | 6621392 | '''
這裡放置一些已經廢棄的程式碼,如果您有需要您可以
從這個 useless_data 倉庫取走可能對你而言有用的程
式碼。
您可以標註借用此處代碼,或者不標註。但若您標註了
,我將因此而開心。
當你決定開始取用此處的程式碼時,本資料庫不擔保程式
碼皆可使用,使用前請測試,而非盲目直接套用(除非你
只是打算用來增加程式碼行數,而不是實際拿來使用)。
'''
# 預計移除
"""
def answerInlineQuery(self, getUpdates, results):
'''
如果不是 Inline 訊息,回傳 False。
若是,回傳 True 並發送 results 陣列。
getUpdates
botHandler.getUpdates() 得到的內容
results
InlineQueryResult 的陣列,請參閱 TG Bot API
'''
if len(getUpdates) != 0:
if 'inline_query' in getUpdates[-1]:
print("[INFO] 發送 results!")
requests.get(
self.url + "answerInlineQuery",
params={
'inline_query_id': getUpdates[-1]['inline_query']['id'],
'results': results
}
)
return True
else:
print("[INFO] 不是 Inline 訊息!")
return False
"""
# 預計移除
'''
# 變數
aiq_InlineQueryResult = {
'type': 'InputTextMessageContent',
'id': 'gogo',
'title': '咕咕咕',
'input_message_content': {
'message_text': randomText()
}
}
'''
| '''
這裡放置一些已經廢棄的程式碼,如果您有需要您可以
從這個 useless_data 倉庫取走可能對你而言有用的程
式碼。
您可以標註借用此處代碼,或者不標註。但若您標註了
,我將因此而開心。
當你決定開始取用此處的程式碼時,本資料庫不擔保程式
碼皆可使用,使用前請測試,而非盲目直接套用(除非你
只是打算用來增加程式碼行數,而不是實際拿來使用)。
'''
# 預計移除
"""
def answerInlineQuery(self, getUpdates, results):
'''
如果不是 Inline 訊息,回傳 False。
若是,回傳 True 並發送 results 陣列。
getUpdates
botHandler.getUpdates() 得到的內容
results
InlineQueryResult 的陣列,請參閱 TG Bot API
'''
if len(getUpdates) != 0:
if 'inline_query' in getUpdates[-1]:
print("[INFO] 發送 results!")
requests.get(
self.url + "answerInlineQuery",
params={
'inline_query_id': getUpdates[-1]['inline_query']['id'],
'results': results
}
)
return True
else:
print("[INFO] 不是 Inline 訊息!")
return False
"""
# 預計移除
'''
# 變數
aiq_InlineQueryResult = {
'type': 'InputTextMessageContent',
'id': 'gogo',
'title': '咕咕咕',
'input_message_content': {
'message_text': randomText()
}
}
'''
| zh | 0.791775 | 這裡放置一些已經廢棄的程式碼,如果您有需要您可以 從這個 useless_data 倉庫取走可能對你而言有用的程 式碼。 您可以標註借用此處代碼,或者不標註。但若您標註了 ,我將因此而開心。 當你決定開始取用此處的程式碼時,本資料庫不擔保程式 碼皆可使用,使用前請測試,而非盲目直接套用(除非你 只是打算用來增加程式碼行數,而不是實際拿來使用)。 # 預計移除 def answerInlineQuery(self, getUpdates, results): ''' 如果不是 Inline 訊息,回傳 False。 若是,回傳 True 並發送 results 陣列。 getUpdates botHandler.getUpdates() 得到的內容 results InlineQueryResult 的陣列,請參閱 TG Bot API ''' if len(getUpdates) != 0: if 'inline_query' in getUpdates[-1]: print("[INFO] 發送 results!") requests.get( self.url + "answerInlineQuery", params={ 'inline_query_id': getUpdates[-1]['inline_query']['id'], 'results': results } ) return True else: print("[INFO] 不是 Inline 訊息!") return False # 預計移除 # 變數 aiq_InlineQueryResult = { 'type': 'InputTextMessageContent', 'id': 'gogo', 'title': '咕咕咕', 'input_message_content': { 'message_text': randomText() } } | 2.944703 | 3 |
server/api/preprocessing/file_preprocessor.py | JBris/dolphin_segmentation | 1 | 6621393 | import os
import shutil
import tarfile
import zipfile
from decouple import config
from pathlib import Path
from api.services.file_select import FileType
from api.services.validation.file import check_valid_image
#zip | tar | dir | images
def _add_preprocessing_metadata(data, files):
return {
"name": data["name"],
"out": data["out"],
"type": data["type"],
"files": files,
"data": data
}
def preprocess_archive(data, extracted_path):
extracted_path_files = os.listdir(extracted_path)
if len(extracted_path_files) == 1 and os.path.isdir(f"{extracted_path}/{extracted_path_files[0]}"):
for file in Path(f"{extracted_path}/{extracted_path_files[0]}").glob("*"): shutil.move(str(file), extracted_path)
files = []
for file in Path(extracted_path).glob("*"):
file = str(file)
if check_valid_image(file): files.append(file)
return _add_preprocessing_metadata(data, files)
def preprocess_zip(data):
with zipfile.ZipFile(data['files'][0], 'r') as f: f.extractall(data['out'])
return preprocess_archive(data, f"{data['out']}/{Path(data['files'][0]).stem}")
def preprocess_tar(data):
with tarfile.open(data['files'][0], 'r') as f: f.extractall(data['out'])
out_dir = f"{data['out']}/{Path(data['files'][0]).stem}"
if Path(out_dir).suffix == ".tar": out_dir = Path(out_dir).stem
# while not os.path.isdir(out_dir): out_dir = Path(out_dir).stem
return preprocess_archive(data, out_dir)
def preprocess_dir(data):
full_path = Path(data["files"][0])
files = []
for file in full_path.glob("*"):
file = str(file)
if check_valid_image(file): files.append(file)
return _add_preprocessing_metadata(data, files)
def preprocess_images(data):
files = []
for file in data["files"]: files.append(file)
return _add_preprocessing_metadata(data, files)
class FilePreprocessor:
def preprocess(self, data):
Path(data['out']).mkdir(parents = True, exist_ok = True)
if data["type"] == FileType.ZIP.value: return preprocess_zip(data)
if data["type"] == FileType.TAR.value: return preprocess_tar(data)
if data["type"] == FileType.DIR.value: return preprocess_dir(data)
if data["type"] == FileType.IMAGES.value: return preprocess_images(data)
raise NotImplementedError(f"FilePreprocessor does not support preprocessing of type: {data['type']}") | import os
import shutil
import tarfile
import zipfile
from decouple import config
from pathlib import Path
from api.services.file_select import FileType
from api.services.validation.file import check_valid_image
#zip | tar | dir | images
def _add_preprocessing_metadata(data, files):
return {
"name": data["name"],
"out": data["out"],
"type": data["type"],
"files": files,
"data": data
}
def preprocess_archive(data, extracted_path):
extracted_path_files = os.listdir(extracted_path)
if len(extracted_path_files) == 1 and os.path.isdir(f"{extracted_path}/{extracted_path_files[0]}"):
for file in Path(f"{extracted_path}/{extracted_path_files[0]}").glob("*"): shutil.move(str(file), extracted_path)
files = []
for file in Path(extracted_path).glob("*"):
file = str(file)
if check_valid_image(file): files.append(file)
return _add_preprocessing_metadata(data, files)
def preprocess_zip(data):
with zipfile.ZipFile(data['files'][0], 'r') as f: f.extractall(data['out'])
return preprocess_archive(data, f"{data['out']}/{Path(data['files'][0]).stem}")
def preprocess_tar(data):
with tarfile.open(data['files'][0], 'r') as f: f.extractall(data['out'])
out_dir = f"{data['out']}/{Path(data['files'][0]).stem}"
if Path(out_dir).suffix == ".tar": out_dir = Path(out_dir).stem
# while not os.path.isdir(out_dir): out_dir = Path(out_dir).stem
return preprocess_archive(data, out_dir)
def preprocess_dir(data):
full_path = Path(data["files"][0])
files = []
for file in full_path.glob("*"):
file = str(file)
if check_valid_image(file): files.append(file)
return _add_preprocessing_metadata(data, files)
def preprocess_images(data):
files = []
for file in data["files"]: files.append(file)
return _add_preprocessing_metadata(data, files)
class FilePreprocessor:
def preprocess(self, data):
Path(data['out']).mkdir(parents = True, exist_ok = True)
if data["type"] == FileType.ZIP.value: return preprocess_zip(data)
if data["type"] == FileType.TAR.value: return preprocess_tar(data)
if data["type"] == FileType.DIR.value: return preprocess_dir(data)
if data["type"] == FileType.IMAGES.value: return preprocess_images(data)
raise NotImplementedError(f"FilePreprocessor does not support preprocessing of type: {data['type']}") | ky | 0.130193 | #zip | tar | dir | images # while not os.path.isdir(out_dir): out_dir = Path(out_dir).stem | 2.573249 | 3 |
bites/analyse/analyse_utils.py | sschrod/BITES | 3 | 6621394 | <reponame>sschrod/BITES<filename>bites/analyse/analyse_utils.py
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import torch
from bites.model.BITES_base import BITES
from bites.model.CFRNet_base import CFRNet
from bites.model.DeepSurv_base import DeepSurv
from bites.utils.eval_surv import EvalSurv
from lifelines import KaplanMeierFitter
from lifelines.statistics import logrank_test
from ray.tune import Analysis
def get_best_model(path_to_experiment="./ray_results/test_hydra", assign_treatment=None):
analysis = Analysis(path_to_experiment, default_metric="val_loss", default_mode="min")
best_config = analysis.get_best_config()
best_checkpoint_dir = analysis.get_best_checkpoint(analysis.get_best_logdir())
if best_config["Method"] == 'BITES' or best_config["Method"] == 'ITES':
best_net = BITES(best_config["num_covariates"], best_config["shared_layer"], best_config["individual_layer"],
out_features=1,
dropout=best_config["dropout"])
elif best_config["Method"] == 'DeepSurv' or best_config["Method"] == 'DeepSurvT':
best_net = DeepSurv(best_config["num_covariates"], best_config["shared_layer"], out_features=1,
dropout=best_config["dropout"])
best_net.treatment = assign_treatment
elif best_config["Method"] == 'CFRNet':
best_net = CFRNet(best_config["num_covariates"], best_config["shared_layer"], best_config["individual_layer"],
out_features=1,
dropout=best_config["dropout"])
else:
print('Method not implemented yet!')
return
model_state, optimizer_state = torch.load(os.path.join(
best_checkpoint_dir, "checkpoint"), map_location=torch.device('cpu'))
best_net.load_state_dict(model_state)
return best_net, best_config
def get_C_Index_BITES(model, X, time, event, treatment):
if not model.baseline_hazards_:
print('Compute Baseline Hazards before running get_C_index')
return
surv0, surv1 = model.predict_surv_df(X, treatment)
surv = pd.concat([surv0, surv1], axis=1)
surv = surv.interpolate('index')
C_index0 = EvalSurv(surv0, time[treatment == 0], event[treatment == 0], censor_surv='km').concordance_td()
C_index1 = EvalSurv(surv1, time[treatment == 1], event[treatment == 1], censor_surv='km').concordance_td()
C_index = EvalSurv(surv, np.append(time[treatment == 0], time[treatment == 1]),
np.append(event[treatment == 0], event[treatment == 1]),
censor_surv='km').concordance_td()
print('Time dependent C-Index: ' + str(C_index)[:5])
print('Treatment 0 C-Index: ' + str(C_index0)[:5])
print('Treatment 1 C-Index: ' + str(C_index1)[:5])
return C_index, C_index0, C_index1
def get_C_Index_DeepSurvT(model0, model1, X, time, event, treatment):
mask0 = treatment == 0
mask1 = treatment == 1
X0, time0, event0 = X[mask0], time[mask0], event[mask0]
X1, time1, event1 = X[mask1], time[mask1], event[mask1]
surv0 = model0.predict_surv_df(X0)
surv1 = model1.predict_surv_df(X1)
surv = pd.concat([surv0, surv1], axis=1)
surv = surv.interpolate('index')
C_index = EvalSurv(surv, np.append(time0, time1),
np.append(event0, event1), censor_surv='km').concordance_td()
C_index0 = EvalSurv(surv0, time0, event0, censor_surv='km').concordance_td()
C_index1 = EvalSurv(surv1, time1, event1, censor_surv='km').concordance_td()
print('Time dependent C-Index: ' + str(C_index)[:5])
print('Treatment 0 C-Index: ' + str(C_index0)[:5])
print('Treatment 1 C-Index: ' + str(C_index1)[:5])
return C_index, C_index0, C_index1
def get_C_Index_DeepSurv(model, X, time, event, treatment=None):
if treatment is not None:
surv = model.predict_surv_df(np.c_[treatment,X])
C_index = EvalSurv(surv, time, event, censor_surv='km').concordance_td()
print('Time dependent C-Index: ' + str(C_index)[:5])
else:
surv = model.predict_surv_df(X)
C_index = EvalSurv(surv, time, event, censor_surv='km').concordance_td()
print('Time dependent C-Index: ' + str(C_index)[:5])
return C_index, None, None
def get_ITE_BITES(model, X, treatment, best_treatment=None, death_probability=0.5):
if not model.baseline_hazards_:
print('Compute Baseline Hazards before running get_ITE()')
return
def find_nearest_index(array, value):
idx = (np.abs(array - value)).argmin()
return idx
surv0, surv1 = model.predict_surv_df(X, treatment)
surv0_cf, surv1_cf = model.predict_surv_counterfactual_df(X, treatment)
"""Find factual and counterfactual prediction: Value at 50% survival probability"""
pred0 = np.zeros(surv0.shape[1])
pred0_cf = np.zeros(surv0.shape[1])
for i in range(surv0.shape[1]):
pred0[i] = surv0.axes[0][find_nearest_index(surv0.iloc[:, i].values, death_probability)]
pred0_cf[i] = surv0_cf.axes[0][find_nearest_index(surv0_cf.iloc[:, i].values, death_probability)]
ITE0 = pred0_cf - pred0
pred1 = np.zeros(surv1.shape[1])
pred1_cf = np.zeros(surv1.shape[1])
for i in range(surv1.shape[1]):
pred1[i] = surv1.axes[0][find_nearest_index(surv1.iloc[:, i].values, death_probability)]
pred1_cf[i] = surv1_cf.axes[0][find_nearest_index(surv1_cf.iloc[:, i].values, death_probability)]
ITE1 = pred1 - pred1_cf
ITE = np.zeros(X.shape[0])
k, j = 0, 0
for i in range(X.shape[0]):
if treatment[i] == 0:
ITE[i] = ITE0[k]
k = k + 1
else:
ITE[i] = ITE1[j]
j = j + 1
correct_predicted_probability=None
if best_treatment is not None:
correct_predicted_probability=np.sum(best_treatment==(ITE>0)*1)/best_treatment.shape[0]
print('Fraction best choice: ' + str(correct_predicted_probability))
return ITE, correct_predicted_probability
def get_ITE_CFRNet(model, X, treatment, best_treatment=None):
pred,_ = model.predict_numpy(X, treatment)
pred_cf,_ = model.predict_numpy(X, 1-treatment)
ITE = np.zeros(X.shape[0])
for i in range(X.shape[0]):
if treatment[i] == 0:
ITE[i] = pred_cf[i]-pred[i]
else:
ITE[i] = pred[i]-pred_cf[i]
correct_predicted_probability=None
if best_treatment is not None:
correct_predicted_probability=np.sum(best_treatment==(ITE>0)*1)/best_treatment.shape[0]
print('Fraction best choice: ' + str(correct_predicted_probability))
return ITE, correct_predicted_probability
def get_ITE_DeepSurvT(model0, model1, X, treatment, best_treatment=None, death_probability=0.5):
def find_nearest_index(array, value):
idx = (np.abs(array - value)).argmin()
return idx
mask0 = treatment == 0
mask1 = treatment == 1
X0 = X[mask0]
X1 = X[mask1]
surv0 = model0.predict_surv_df(X0)
surv0_cf = model1.predict_surv_df(X0)
surv1 = model1.predict_surv_df(X1)
surv1_cf = model0.predict_surv_df(X1)
"""Find factual and counterfactual prediction: Value at 50% survival probability"""
pred0 = np.zeros(surv0.shape[1])
pred0_cf = np.zeros(surv0.shape[1])
for i in range(surv0.shape[1]):
pred0[i] = surv0.axes[0][find_nearest_index(surv0.iloc[:, i].values, death_probability)]
pred0_cf[i] = surv0_cf.axes[0][find_nearest_index(surv0_cf.iloc[:, i].values, death_probability)]
ITE0 = pred0_cf - pred0
pred1 = np.zeros(surv1.shape[1])
pred1_cf = np.zeros(surv1.shape[1])
for i in range(surv1.shape[1]):
pred1[i] = surv1.axes[0][find_nearest_index(surv1.iloc[:, i].values, death_probability)]
pred1_cf[i] = surv1_cf.axes[0][find_nearest_index(surv1_cf.iloc[:, i].values, death_probability)]
ITE1 = pred1 - pred1_cf
ITE = np.zeros(X.shape[0])
k, j = 0, 0
for i in range(X.shape[0]):
if treatment[i] == 0:
ITE[i] = ITE0[k]
k = k + 1
else:
ITE[i] = ITE1[j]
j = j + 1
correct_predicted_probability=None
if best_treatment is not None:
correct_predicted_probability=np.sum(best_treatment==(ITE>0)*1)/best_treatment.shape[0]
print('Fraction best choice: ' + str(correct_predicted_probability))
return ITE, correct_predicted_probability
def get_ITE_DeepSurv(model, X, treatment, best_treatment=None, death_probability=0.5):
def find_nearest_index(array, value):
idx = (np.abs(array - value)).argmin()
return idx
mask0 = treatment == 0
mask1 = treatment == 1
X0 = X[mask0]
X1 = X[mask1]
surv0 = model.predict_surv_df(X0)
surv0_cf = model.predict_surv_df(np.c_[1-X0[:,0],X0[:,1:]])
surv1 = model.predict_surv_df(X1)
surv1_cf = model.predict_surv_df(np.c_[1-X1[:,0],X1[:,1:]])
"""Find factual and counterfactual prediction: Value at 50% survival probability"""
pred0 = np.zeros(surv0.shape[1])
pred0_cf = np.zeros(surv0.shape[1])
for i in range(surv0.shape[1]):
pred0[i] = surv0.axes[0][find_nearest_index(surv0.iloc[:, i].values, death_probability)]
pred0_cf[i] = surv0_cf.axes[0][find_nearest_index(surv0_cf.iloc[:, i].values, death_probability)]
ITE0 = pred0_cf - pred0
pred1 = np.zeros(surv1.shape[1])
pred1_cf = np.zeros(surv1.shape[1])
for i in range(surv1.shape[1]):
pred1[i] = surv1.axes[0][find_nearest_index(surv1.iloc[:, i].values, death_probability)]
pred1_cf[i] = surv1_cf.axes[0][find_nearest_index(surv1_cf.iloc[:, i].values, death_probability)]
ITE1 = pred1 - pred1_cf
ITE = np.zeros(X.shape[0])
k, j = 0, 0
for i in range(X.shape[0]):
if treatment[i] == 0:
ITE[i] = ITE0[k]
k = k + 1
else:
ITE[i] = ITE1[j]
j = j + 1
correct_predicted_probability=None
if best_treatment is not None:
correct_predicted_probability=np.sum(best_treatment==(ITE>0)*1)/best_treatment.shape[0]
print('Fraction best choice: ' + str(correct_predicted_probability))
return ITE, correct_predicted_probability
def analyse_randomized_test_set(pred_ite, Y_test, event_test, treatment_test, C_index=None, method_name='set_name', save_path=None,new_figure=True,annotate=True):
mask_recommended = (pred_ite > 0) == treatment_test
mask_antirecommended = (pred_ite < 0) == treatment_test
recommended_times = Y_test[mask_recommended]
recommended_event = event_test[mask_recommended]
antirecommended_times = Y_test[mask_antirecommended]
antirecommended_event = event_test[mask_antirecommended]
logrank_result = logrank_test(recommended_times, antirecommended_times, recommended_event, antirecommended_event, alpha=0.95)
colors = sns.color_palette()
kmf = KaplanMeierFitter()
kmf_cf = KaplanMeierFitter()
if method_name==None:
kmf.fit(recommended_times, recommended_event, label='Treated')
kmf_cf.fit(antirecommended_times, antirecommended_event, label='Control')
else:
kmf.fit(recommended_times, recommended_event, label=method_name + ' Recommendation')
kmf_cf.fit(antirecommended_times, antirecommended_event, label=method_name + ' Anti-Recommendation')
if new_figure:
#plt.figure(figsize=(8, 2.7))
#kmf.plot(c=colors[0])
#kmf_cf.plot(c=colors[1])
if method_name==None:
kmf.plot(c=colors[0],ci_show=False)
kmf_cf.plot(c=colors[1],ci_show=False)
else:
kmf.plot(c=colors[0])
kmf_cf.plot(c=colors[1])
else:
kmf.plot(c=colors[2])
kmf_cf.plot(c=colors[3])
if annotate:
# Calculate p-value text position and display.
y_pos = 0.4
plt.text(1 * 3, y_pos, f"$p$ = {logrank_result.p_value:.6f}", fontsize='small')
fraction2 = np.sum((pred_ite > 0)) / pred_ite.shape[0]
plt.text(1 * 3, 0.3, 'C-Index=' + str(C_index)[:5], fontsize='small')
plt.text(1 * 3, 0.2, f"{fraction2 * 100:.1f}% recommended for T=1", fontsize='small')
plt.xlabel('Survival Time [month]')
plt.ylabel('Survival Probability')
plt.tight_layout()
if save_path:
plt.savefig(save_path, format='pdf')
def plot_ITE_correlation(pred_ITE, y_true,y_cf,treatment):
ITE = np.zeros(pred_ITE.shape[0])
true_ITE0 = -(y_true[treatment == 0] - y_cf[treatment == 0])
true_ITE1 = y_true[treatment == 1] - y_cf[treatment == 1]
k, j = 0, 0
for i in range(pred_ITE.shape[0]):
if treatment[i] == 0:
ITE[i] = true_ITE0[k]
k = k + 1
else:
ITE[i] = true_ITE1[j]
j = j + 1
ax=sns.scatterplot(x=ITE,y=pred_ITE)
ax.set(xlabel='ITE', ylabel='pred_ITE') | import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import torch
from bites.model.BITES_base import BITES
from bites.model.CFRNet_base import CFRNet
from bites.model.DeepSurv_base import DeepSurv
from bites.utils.eval_surv import EvalSurv
from lifelines import KaplanMeierFitter
from lifelines.statistics import logrank_test
from ray.tune import Analysis
def get_best_model(path_to_experiment="./ray_results/test_hydra", assign_treatment=None):
analysis = Analysis(path_to_experiment, default_metric="val_loss", default_mode="min")
best_config = analysis.get_best_config()
best_checkpoint_dir = analysis.get_best_checkpoint(analysis.get_best_logdir())
if best_config["Method"] == 'BITES' or best_config["Method"] == 'ITES':
best_net = BITES(best_config["num_covariates"], best_config["shared_layer"], best_config["individual_layer"],
out_features=1,
dropout=best_config["dropout"])
elif best_config["Method"] == 'DeepSurv' or best_config["Method"] == 'DeepSurvT':
best_net = DeepSurv(best_config["num_covariates"], best_config["shared_layer"], out_features=1,
dropout=best_config["dropout"])
best_net.treatment = assign_treatment
elif best_config["Method"] == 'CFRNet':
best_net = CFRNet(best_config["num_covariates"], best_config["shared_layer"], best_config["individual_layer"],
out_features=1,
dropout=best_config["dropout"])
else:
print('Method not implemented yet!')
return
model_state, optimizer_state = torch.load(os.path.join(
best_checkpoint_dir, "checkpoint"), map_location=torch.device('cpu'))
best_net.load_state_dict(model_state)
return best_net, best_config
def get_C_Index_BITES(model, X, time, event, treatment):
if not model.baseline_hazards_:
print('Compute Baseline Hazards before running get_C_index')
return
surv0, surv1 = model.predict_surv_df(X, treatment)
surv = pd.concat([surv0, surv1], axis=1)
surv = surv.interpolate('index')
C_index0 = EvalSurv(surv0, time[treatment == 0], event[treatment == 0], censor_surv='km').concordance_td()
C_index1 = EvalSurv(surv1, time[treatment == 1], event[treatment == 1], censor_surv='km').concordance_td()
C_index = EvalSurv(surv, np.append(time[treatment == 0], time[treatment == 1]),
np.append(event[treatment == 0], event[treatment == 1]),
censor_surv='km').concordance_td()
print('Time dependent C-Index: ' + str(C_index)[:5])
print('Treatment 0 C-Index: ' + str(C_index0)[:5])
print('Treatment 1 C-Index: ' + str(C_index1)[:5])
return C_index, C_index0, C_index1
def get_C_Index_DeepSurvT(model0, model1, X, time, event, treatment):
mask0 = treatment == 0
mask1 = treatment == 1
X0, time0, event0 = X[mask0], time[mask0], event[mask0]
X1, time1, event1 = X[mask1], time[mask1], event[mask1]
surv0 = model0.predict_surv_df(X0)
surv1 = model1.predict_surv_df(X1)
surv = pd.concat([surv0, surv1], axis=1)
surv = surv.interpolate('index')
C_index = EvalSurv(surv, np.append(time0, time1),
np.append(event0, event1), censor_surv='km').concordance_td()
C_index0 = EvalSurv(surv0, time0, event0, censor_surv='km').concordance_td()
C_index1 = EvalSurv(surv1, time1, event1, censor_surv='km').concordance_td()
print('Time dependent C-Index: ' + str(C_index)[:5])
print('Treatment 0 C-Index: ' + str(C_index0)[:5])
print('Treatment 1 C-Index: ' + str(C_index1)[:5])
return C_index, C_index0, C_index1
def get_C_Index_DeepSurv(model, X, time, event, treatment=None):
if treatment is not None:
surv = model.predict_surv_df(np.c_[treatment,X])
C_index = EvalSurv(surv, time, event, censor_surv='km').concordance_td()
print('Time dependent C-Index: ' + str(C_index)[:5])
else:
surv = model.predict_surv_df(X)
C_index = EvalSurv(surv, time, event, censor_surv='km').concordance_td()
print('Time dependent C-Index: ' + str(C_index)[:5])
return C_index, None, None
def get_ITE_BITES(model, X, treatment, best_treatment=None, death_probability=0.5):
if not model.baseline_hazards_:
print('Compute Baseline Hazards before running get_ITE()')
return
def find_nearest_index(array, value):
idx = (np.abs(array - value)).argmin()
return idx
surv0, surv1 = model.predict_surv_df(X, treatment)
surv0_cf, surv1_cf = model.predict_surv_counterfactual_df(X, treatment)
"""Find factual and counterfactual prediction: Value at 50% survival probability"""
pred0 = np.zeros(surv0.shape[1])
pred0_cf = np.zeros(surv0.shape[1])
for i in range(surv0.shape[1]):
pred0[i] = surv0.axes[0][find_nearest_index(surv0.iloc[:, i].values, death_probability)]
pred0_cf[i] = surv0_cf.axes[0][find_nearest_index(surv0_cf.iloc[:, i].values, death_probability)]
ITE0 = pred0_cf - pred0
pred1 = np.zeros(surv1.shape[1])
pred1_cf = np.zeros(surv1.shape[1])
for i in range(surv1.shape[1]):
pred1[i] = surv1.axes[0][find_nearest_index(surv1.iloc[:, i].values, death_probability)]
pred1_cf[i] = surv1_cf.axes[0][find_nearest_index(surv1_cf.iloc[:, i].values, death_probability)]
ITE1 = pred1 - pred1_cf
ITE = np.zeros(X.shape[0])
k, j = 0, 0
for i in range(X.shape[0]):
if treatment[i] == 0:
ITE[i] = ITE0[k]
k = k + 1
else:
ITE[i] = ITE1[j]
j = j + 1
correct_predicted_probability=None
if best_treatment is not None:
correct_predicted_probability=np.sum(best_treatment==(ITE>0)*1)/best_treatment.shape[0]
print('Fraction best choice: ' + str(correct_predicted_probability))
return ITE, correct_predicted_probability
def get_ITE_CFRNet(model, X, treatment, best_treatment=None):
pred,_ = model.predict_numpy(X, treatment)
pred_cf,_ = model.predict_numpy(X, 1-treatment)
ITE = np.zeros(X.shape[0])
for i in range(X.shape[0]):
if treatment[i] == 0:
ITE[i] = pred_cf[i]-pred[i]
else:
ITE[i] = pred[i]-pred_cf[i]
correct_predicted_probability=None
if best_treatment is not None:
correct_predicted_probability=np.sum(best_treatment==(ITE>0)*1)/best_treatment.shape[0]
print('Fraction best choice: ' + str(correct_predicted_probability))
return ITE, correct_predicted_probability
def get_ITE_DeepSurvT(model0, model1, X, treatment, best_treatment=None, death_probability=0.5):
def find_nearest_index(array, value):
idx = (np.abs(array - value)).argmin()
return idx
mask0 = treatment == 0
mask1 = treatment == 1
X0 = X[mask0]
X1 = X[mask1]
surv0 = model0.predict_surv_df(X0)
surv0_cf = model1.predict_surv_df(X0)
surv1 = model1.predict_surv_df(X1)
surv1_cf = model0.predict_surv_df(X1)
"""Find factual and counterfactual prediction: Value at 50% survival probability"""
pred0 = np.zeros(surv0.shape[1])
pred0_cf = np.zeros(surv0.shape[1])
for i in range(surv0.shape[1]):
pred0[i] = surv0.axes[0][find_nearest_index(surv0.iloc[:, i].values, death_probability)]
pred0_cf[i] = surv0_cf.axes[0][find_nearest_index(surv0_cf.iloc[:, i].values, death_probability)]
ITE0 = pred0_cf - pred0
pred1 = np.zeros(surv1.shape[1])
pred1_cf = np.zeros(surv1.shape[1])
for i in range(surv1.shape[1]):
pred1[i] = surv1.axes[0][find_nearest_index(surv1.iloc[:, i].values, death_probability)]
pred1_cf[i] = surv1_cf.axes[0][find_nearest_index(surv1_cf.iloc[:, i].values, death_probability)]
ITE1 = pred1 - pred1_cf
ITE = np.zeros(X.shape[0])
k, j = 0, 0
for i in range(X.shape[0]):
if treatment[i] == 0:
ITE[i] = ITE0[k]
k = k + 1
else:
ITE[i] = ITE1[j]
j = j + 1
correct_predicted_probability=None
if best_treatment is not None:
correct_predicted_probability=np.sum(best_treatment==(ITE>0)*1)/best_treatment.shape[0]
print('Fraction best choice: ' + str(correct_predicted_probability))
return ITE, correct_predicted_probability
def get_ITE_DeepSurv(model, X, treatment, best_treatment=None, death_probability=0.5):
def find_nearest_index(array, value):
idx = (np.abs(array - value)).argmin()
return idx
mask0 = treatment == 0
mask1 = treatment == 1
X0 = X[mask0]
X1 = X[mask1]
surv0 = model.predict_surv_df(X0)
surv0_cf = model.predict_surv_df(np.c_[1-X0[:,0],X0[:,1:]])
surv1 = model.predict_surv_df(X1)
surv1_cf = model.predict_surv_df(np.c_[1-X1[:,0],X1[:,1:]])
"""Find factual and counterfactual prediction: Value at 50% survival probability"""
pred0 = np.zeros(surv0.shape[1])
pred0_cf = np.zeros(surv0.shape[1])
for i in range(surv0.shape[1]):
pred0[i] = surv0.axes[0][find_nearest_index(surv0.iloc[:, i].values, death_probability)]
pred0_cf[i] = surv0_cf.axes[0][find_nearest_index(surv0_cf.iloc[:, i].values, death_probability)]
ITE0 = pred0_cf - pred0
pred1 = np.zeros(surv1.shape[1])
pred1_cf = np.zeros(surv1.shape[1])
for i in range(surv1.shape[1]):
pred1[i] = surv1.axes[0][find_nearest_index(surv1.iloc[:, i].values, death_probability)]
pred1_cf[i] = surv1_cf.axes[0][find_nearest_index(surv1_cf.iloc[:, i].values, death_probability)]
ITE1 = pred1 - pred1_cf
ITE = np.zeros(X.shape[0])
k, j = 0, 0
for i in range(X.shape[0]):
if treatment[i] == 0:
ITE[i] = ITE0[k]
k = k + 1
else:
ITE[i] = ITE1[j]
j = j + 1
correct_predicted_probability=None
if best_treatment is not None:
correct_predicted_probability=np.sum(best_treatment==(ITE>0)*1)/best_treatment.shape[0]
print('Fraction best choice: ' + str(correct_predicted_probability))
return ITE, correct_predicted_probability
def analyse_randomized_test_set(pred_ite, Y_test, event_test, treatment_test, C_index=None, method_name='set_name', save_path=None,new_figure=True,annotate=True):
mask_recommended = (pred_ite > 0) == treatment_test
mask_antirecommended = (pred_ite < 0) == treatment_test
recommended_times = Y_test[mask_recommended]
recommended_event = event_test[mask_recommended]
antirecommended_times = Y_test[mask_antirecommended]
antirecommended_event = event_test[mask_antirecommended]
logrank_result = logrank_test(recommended_times, antirecommended_times, recommended_event, antirecommended_event, alpha=0.95)
colors = sns.color_palette()
kmf = KaplanMeierFitter()
kmf_cf = KaplanMeierFitter()
if method_name==None:
kmf.fit(recommended_times, recommended_event, label='Treated')
kmf_cf.fit(antirecommended_times, antirecommended_event, label='Control')
else:
kmf.fit(recommended_times, recommended_event, label=method_name + ' Recommendation')
kmf_cf.fit(antirecommended_times, antirecommended_event, label=method_name + ' Anti-Recommendation')
if new_figure:
#plt.figure(figsize=(8, 2.7))
#kmf.plot(c=colors[0])
#kmf_cf.plot(c=colors[1])
if method_name==None:
kmf.plot(c=colors[0],ci_show=False)
kmf_cf.plot(c=colors[1],ci_show=False)
else:
kmf.plot(c=colors[0])
kmf_cf.plot(c=colors[1])
else:
kmf.plot(c=colors[2])
kmf_cf.plot(c=colors[3])
if annotate:
# Calculate p-value text position and display.
y_pos = 0.4
plt.text(1 * 3, y_pos, f"$p$ = {logrank_result.p_value:.6f}", fontsize='small')
fraction2 = np.sum((pred_ite > 0)) / pred_ite.shape[0]
plt.text(1 * 3, 0.3, 'C-Index=' + str(C_index)[:5], fontsize='small')
plt.text(1 * 3, 0.2, f"{fraction2 * 100:.1f}% recommended for T=1", fontsize='small')
plt.xlabel('Survival Time [month]')
plt.ylabel('Survival Probability')
plt.tight_layout()
if save_path:
plt.savefig(save_path, format='pdf')
def plot_ITE_correlation(pred_ITE, y_true,y_cf,treatment):
ITE = np.zeros(pred_ITE.shape[0])
true_ITE0 = -(y_true[treatment == 0] - y_cf[treatment == 0])
true_ITE1 = y_true[treatment == 1] - y_cf[treatment == 1]
k, j = 0, 0
for i in range(pred_ITE.shape[0]):
if treatment[i] == 0:
ITE[i] = true_ITE0[k]
k = k + 1
else:
ITE[i] = true_ITE1[j]
j = j + 1
ax=sns.scatterplot(x=ITE,y=pred_ITE)
ax.set(xlabel='ITE', ylabel='pred_ITE') | en | 0.573885 | Find factual and counterfactual prediction: Value at 50% survival probability Find factual and counterfactual prediction: Value at 50% survival probability Find factual and counterfactual prediction: Value at 50% survival probability #plt.figure(figsize=(8, 2.7)) #kmf.plot(c=colors[0]) #kmf_cf.plot(c=colors[1]) # Calculate p-value text position and display. | 1.925095 | 2 |
amin_function.py | Infernolia/Computational_Statistics | 0 | 6621395 | import numpy as np
a = np.array(
[
[50,10,30],
[60,5,40],
[70,80,90]
]
)
# return minimum element from all the dimension
print(np.amin(a))
#minimum from each column (axis=0 means column-wise operation)
print(np.amin(a,axis=0))
#minimum from each row (axis=1 means row-wise operation)
print(np.amin(a,axis=1))
| import numpy as np
a = np.array(
[
[50,10,30],
[60,5,40],
[70,80,90]
]
)
# return minimum element from all the dimension
print(np.amin(a))
#minimum from each column (axis=0 means column-wise operation)
print(np.amin(a,axis=0))
#minimum from each row (axis=1 means row-wise operation)
print(np.amin(a,axis=1))
| en | 0.850133 | # return minimum element from all the dimension #minimum from each column (axis=0 means column-wise operation) #minimum from each row (axis=1 means row-wise operation) | 4.194852 | 4 |
mjdir/commands/turbomole.py | aimat-lab/jobdir_queue_submit | 0 | 6621396 | <filename>mjdir/commands/turbomole.py
import os
# import ase
# Example Commands
TURBOMOLE_SLURM_HEADERS = {"int-nano": ''.join(['module purge\n',
# 'export PARA_ARCH=SMP\n',
'export PARNODES=$SLURM_NPROCS\n',
'module load turbomole/7.4.1\n',
'cd $SLURM_SUBMIT_DIR\n'
# 'export TURBODIR=/shared/software/chem/TURBOMOLE/TURBOMOLE-V7.4.1\n',
# 'export PATH=$TURBODIR/scripts:$PATH\n',
# 'export PATH=$TURBODIR/bin/`sysname`:$PATH\n',
# 'export OMP_NUM_THREADS=$SLURM_NPROCS\n'
]),
"for-hlr": ''.join(['module purge\n',
# 'export PARA_ARCH=SMP\n',
'export PARNODES=$SLURM_NPROCS\n'
'module load chem/turbomole/7.3\n',
'cd $SLURM_SUBMIT_DIR\n'
# 'export TURBODIR=/shared/software/chem/TURBOMOLE/TURBOMOLE-V7.4.1\n',
# 'export PATH=$TURBODIR/scripts:$PATH\n',
# 'export PATH=$TURBODIR/bin/`sysname`:$PATH\n',
# 'export OMP_NUM_THREADS=$SLURM_NPROCS\n'
])
}
TURBOMOLE_SLURM_COMMANDS = {"energy": 'cd {path} && ridft > ridft.out',
"eiger": 'cd {path} && eiger > atomic.levels.dat',
"gradient": "",
"optimize": "cd {path} && jobex -ri > add_jobex.out",
"frequencies": ""
}
def write_turbomole_input(filepath, calc, at):
"""
Write input files for turbomole
Args:
filepath (str,path): File path to destination folder.
calc (TYPE): ASE Turbomole object.
at (TYPE): ASE atoms object.
Returns:
None.
"""
workdir = os.getcwd()
os.chdir(filepath)
try:
calc.set_atoms(at)
calc.initialize()
except:
print("Error: cant make input for: ", filepath)
os.chdir(workdir)
else:
os.chdir(workdir)
def read_turbomole_output(filepath, calc):
"""
Read turbomole output in ASE turbomole object.
Args:
filepath (str,path): File path to destination folder.
calc (TYPE): ASE Turbomole object.
Returns:
calc (TYPE): ASE Turbomole object.
"""
workdir = os.getcwd()
os.chdir(filepath)
try:
calc.read_results()
except:
print("Error: cant read input for: ", filepath)
os.chdir(workdir)
else:
os.chdir(workdir)
return calc
def read_turbomole_eiger_file(path):
"""
Read the ouput of eiger files.
Args:
path (str,path): File path to destination folder.
Returns:
tuple: homo,lumo,toteng.
"""
homo = None
lumo = None
toteng = None
with open(os.path.join(path, "atomic.levels.dat"), "r") as f:
for line in f.readlines():
if line.find('HOMO:') > 0:
line_list = line.split(' ')
homo = line_list[-2]
if line.find('LUMO:') > 0:
line_list = line.split(' ')
lumo = line_list[-2]
if line.find('Total energy') >= 0:
line_list = line.split(' ')
toteng = line_list[-2]
return float(homo), float(lumo), float(toteng)
| <filename>mjdir/commands/turbomole.py
import os
# import ase
# Example Commands
TURBOMOLE_SLURM_HEADERS = {"int-nano": ''.join(['module purge\n',
# 'export PARA_ARCH=SMP\n',
'export PARNODES=$SLURM_NPROCS\n',
'module load turbomole/7.4.1\n',
'cd $SLURM_SUBMIT_DIR\n'
# 'export TURBODIR=/shared/software/chem/TURBOMOLE/TURBOMOLE-V7.4.1\n',
# 'export PATH=$TURBODIR/scripts:$PATH\n',
# 'export PATH=$TURBODIR/bin/`sysname`:$PATH\n',
# 'export OMP_NUM_THREADS=$SLURM_NPROCS\n'
]),
"for-hlr": ''.join(['module purge\n',
# 'export PARA_ARCH=SMP\n',
'export PARNODES=$SLURM_NPROCS\n'
'module load chem/turbomole/7.3\n',
'cd $SLURM_SUBMIT_DIR\n'
# 'export TURBODIR=/shared/software/chem/TURBOMOLE/TURBOMOLE-V7.4.1\n',
# 'export PATH=$TURBODIR/scripts:$PATH\n',
# 'export PATH=$TURBODIR/bin/`sysname`:$PATH\n',
# 'export OMP_NUM_THREADS=$SLURM_NPROCS\n'
])
}
TURBOMOLE_SLURM_COMMANDS = {"energy": 'cd {path} && ridft > ridft.out',
"eiger": 'cd {path} && eiger > atomic.levels.dat',
"gradient": "",
"optimize": "cd {path} && jobex -ri > add_jobex.out",
"frequencies": ""
}
def write_turbomole_input(filepath, calc, at):
"""
Write input files for turbomole
Args:
filepath (str,path): File path to destination folder.
calc (TYPE): ASE Turbomole object.
at (TYPE): ASE atoms object.
Returns:
None.
"""
workdir = os.getcwd()
os.chdir(filepath)
try:
calc.set_atoms(at)
calc.initialize()
except:
print("Error: cant make input for: ", filepath)
os.chdir(workdir)
else:
os.chdir(workdir)
def read_turbomole_output(filepath, calc):
"""
Read turbomole output in ASE turbomole object.
Args:
filepath (str,path): File path to destination folder.
calc (TYPE): ASE Turbomole object.
Returns:
calc (TYPE): ASE Turbomole object.
"""
workdir = os.getcwd()
os.chdir(filepath)
try:
calc.read_results()
except:
print("Error: cant read input for: ", filepath)
os.chdir(workdir)
else:
os.chdir(workdir)
return calc
def read_turbomole_eiger_file(path):
"""
Read the ouput of eiger files.
Args:
path (str,path): File path to destination folder.
Returns:
tuple: homo,lumo,toteng.
"""
homo = None
lumo = None
toteng = None
with open(os.path.join(path, "atomic.levels.dat"), "r") as f:
for line in f.readlines():
if line.find('HOMO:') > 0:
line_list = line.split(' ')
homo = line_list[-2]
if line.find('LUMO:') > 0:
line_list = line.split(' ')
lumo = line_list[-2]
if line.find('Total energy') >= 0:
line_list = line.split(' ')
toteng = line_list[-2]
return float(homo), float(lumo), float(toteng)
| en | 0.337835 | # import ase # Example Commands # 'export PARA_ARCH=SMP\n', # 'export TURBODIR=/shared/software/chem/TURBOMOLE/TURBOMOLE-V7.4.1\n', # 'export PATH=$TURBODIR/scripts:$PATH\n', # 'export PATH=$TURBODIR/bin/`sysname`:$PATH\n', # 'export OMP_NUM_THREADS=$SLURM_NPROCS\n' # 'export PARA_ARCH=SMP\n', # 'export TURBODIR=/shared/software/chem/TURBOMOLE/TURBOMOLE-V7.4.1\n', # 'export PATH=$TURBODIR/scripts:$PATH\n', # 'export PATH=$TURBODIR/bin/`sysname`:$PATH\n', # 'export OMP_NUM_THREADS=$SLURM_NPROCS\n' Write input files for turbomole
Args:
filepath (str,path): File path to destination folder.
calc (TYPE): ASE Turbomole object.
at (TYPE): ASE atoms object.
Returns:
None. Read turbomole output in ASE turbomole object.
Args:
filepath (str,path): File path to destination folder.
calc (TYPE): ASE Turbomole object.
Returns:
calc (TYPE): ASE Turbomole object. Read the ouput of eiger files.
Args:
path (str,path): File path to destination folder.
Returns:
tuple: homo,lumo,toteng. | 1.702964 | 2 |
students/forms.py | estudeplus/perfil | 0 | 6621397 | <gh_stars>0
from django import forms
from .models import Student
class StudentForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput)
class Meta:
model = Student
fields = ['name', 'student_id', 'email', 'password'] | from django import forms
from .models import Student
class StudentForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput)
class Meta:
model = Student
fields = ['name', 'student_id', 'email', 'password'] | none | 1 | 2.3682 | 2 | |
pirates/quest/QuestIndicatorNodeItem.py | itsyaboyrocket/pirates | 3 | 6621398 | # uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.quest.QuestIndicatorNodeItem
from pirates.quest.QuestIndicatorNode import QuestIndicatorNode
class QuestIndicatorNodeItem(QuestIndicatorNode):
__module__ = __name__
def __init__(self, questStep):
self.pendingStepObj = None
QuestIndicatorNode.__init__(self, 'ItemIndicator', [], questStep)
return
def delete(self):
if self.pendingStepObj:
base.cr.relatedObjectMgr.abortRequest(self.pendingStepObj)
self.pendingStepObj = None
QuestIndicatorNode.delete(self)
return | # uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.quest.QuestIndicatorNodeItem
from pirates.quest.QuestIndicatorNode import QuestIndicatorNode
class QuestIndicatorNodeItem(QuestIndicatorNode):
__module__ = __name__
def __init__(self, questStep):
self.pendingStepObj = None
QuestIndicatorNode.__init__(self, 'ItemIndicator', [], questStep)
return
def delete(self):
if self.pendingStepObj:
base.cr.relatedObjectMgr.abortRequest(self.pendingStepObj)
self.pendingStepObj = None
QuestIndicatorNode.delete(self)
return | en | 0.524772 | # uncompyle6 version 3.2.0 # Python bytecode 2.4 (62061) # Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)] # Embedded file name: pirates.quest.QuestIndicatorNodeItem | 2.14559 | 2 |
handlers/groups.py | ghowland/deployman | 0 | 6621399 | <filename>handlers/groups.py
"""
sysync: handlers: groups
Module installing groups
"""
from utility.log import Log
from utility.error import Error
from utility.run import Run, RunOnCommit
from utility.os_compatibility import GetFileStatInfo
# Commands we will use to manage groups
COMMAND_GROUP_EXISTS = 'egrep -i "^%s:" /etc/group'
COMMAND_ADD_GROUP = '/usr/sbin/groupadd'
def GetKeys(section_item, options):
"""Returns the key used for the Work List/Data work_key"""
if 'name' not in section_item or section_item['name'] == None:
Error('Section Item does not have a "name" key: %s' % section_item, options)
# Returns List, always a single item for this handler
return [section_item['name']]
def Install(section_item, config, options):
Log('Group: %s' % section_item)
# Does this group exist?
(status, output) = Run(COMMAND_GROUP_EXISTS % section_item['name'])
# If this group doesnt exist, add it
if status != 0:
cmd = COMMAND_ADD_GROUP
# -- Add Options --
# UID
if section_item['gid'] != None:
try:
int(section_item['gid'])
except ValueError, e:
Error('Group GID is not a number: %s: %s' % (section_item['name'], e), options)
cmd += ' -g %s' % section_item['gid']
# Add the group name
cmd += ' %s' % section_item['name']
RunOnCommit(cmd, 'Failed to add group: %s' % section_item['name'], options)
| <filename>handlers/groups.py
"""
sysync: handlers: groups
Module installing groups
"""
from utility.log import Log
from utility.error import Error
from utility.run import Run, RunOnCommit
from utility.os_compatibility import GetFileStatInfo
# Commands we will use to manage groups
COMMAND_GROUP_EXISTS = 'egrep -i "^%s:" /etc/group'
COMMAND_ADD_GROUP = '/usr/sbin/groupadd'
def GetKeys(section_item, options):
"""Returns the key used for the Work List/Data work_key"""
if 'name' not in section_item or section_item['name'] == None:
Error('Section Item does not have a "name" key: %s' % section_item, options)
# Returns List, always a single item for this handler
return [section_item['name']]
def Install(section_item, config, options):
Log('Group: %s' % section_item)
# Does this group exist?
(status, output) = Run(COMMAND_GROUP_EXISTS % section_item['name'])
# If this group doesnt exist, add it
if status != 0:
cmd = COMMAND_ADD_GROUP
# -- Add Options --
# UID
if section_item['gid'] != None:
try:
int(section_item['gid'])
except ValueError, e:
Error('Group GID is not a number: %s: %s' % (section_item['name'], e), options)
cmd += ' -g %s' % section_item['gid']
# Add the group name
cmd += ' %s' % section_item['name']
RunOnCommit(cmd, 'Failed to add group: %s' % section_item['name'], options)
| en | 0.648859 | sysync: handlers: groups Module installing groups # Commands we will use to manage groups Returns the key used for the Work List/Data work_key # Returns List, always a single item for this handler # Does this group exist? # If this group doesnt exist, add it # -- Add Options -- # UID # Add the group name | 2.272938 | 2 |
problems/prob7/prob7.py | w1ldy0uth/Project-Euler | 1 | 6621400 | #!/usr/bin/env python3
# -*- coding: UTF=8 -*-
"""
By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that
the 6th prime is 13.
What is the 10 001st prime number?
"""
def isPrime(n):
if n % 2 == 0:
return n == 2
d = 3
while d * d <= n and n % d != 0:
d += 2
return d * d > n
def primes():
prime = 13
num = 6
while num != 10001:
prime += 2
if isPrime(prime):
num += 1
return prime
if __name__ == '__main__':
print(primes())
| #!/usr/bin/env python3
# -*- coding: UTF=8 -*-
"""
By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that
the 6th prime is 13.
What is the 10 001st prime number?
"""
def isPrime(n):
if n % 2 == 0:
return n == 2
d = 3
while d * d <= n and n % d != 0:
d += 2
return d * d > n
def primes():
prime = 13
num = 6
while num != 10001:
prime += 2
if isPrime(prime):
num += 1
return prime
if __name__ == '__main__':
print(primes())
| en | 0.893213 | #!/usr/bin/env python3 # -*- coding: UTF=8 -*- By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13. What is the 10 001st prime number? | 4.334476 | 4 |
holiday/objoriented.py | daveshed/Euler | 0 | 6621401 | <filename>holiday/objoriented.py
import random
import sys
TOLERANCE = 0.00001
class Day:
def __init__(self, date):
self.date = date
class Holiday(Day):
holiday = True
class WorkDay(Day):
holiday = False
class Calendar:
def __init__(self, n_days):
self.n_days = n_days
self.days = [WorkDay(date) for date in range(n_days)]
def get_day(self, date):
return self.days[self._normalise_date(date)]
def is_holiday(self, date):
day = self.get_day(date)
return day.holiday
def is_single_workday(self, date):
day = self.get_day(date)
# look out for index error
return self.is_holiday(date + 1) and self.is_holiday(date - 1)
def to_holiday(self, date):
self.days[self._normalise_date(date)] = Holiday(date)
def to_workday(self, date):
self.days[self._normalise_date(date)] = WorkDay(date)
@property
def party_on(self):
return all([self.is_holiday(date) for date in range(self.n_days)])
def _normalise_date(self, date):
return date % self.n_days
class Emperor:
def spawn(calendar):
birthdate = random.randrange(calendar.n_days)
# print("spawning Emperor with birthdate {}".format(birthdate))
if calendar.is_holiday(birthdate):
return
else:
calendar.to_holiday(birthdate)
if calendar.is_single_workday(birthdate + 1):
calendar.to_holiday(birthdate + 1)
if calendar.is_single_workday(birthdate - 1):
calendar.to_holiday(birthdate - 1)
class World:
def __init__(self, n_days):
self.calendar = Calendar(n_days)
def run(self):
counter = 0
while not self.calendar.party_on:
Emperor.spawn(self.calendar)
counter += 1
return counter
def expected_emperors(days):
results = []
new_result = 1
old_result = new_result + TOLERANCE * 2
n_runs = 0
while abs(new_result - old_result) > TOLERANCE:
n_runs += 1
old_result = new_result
world = World(days)
n_emperors = world.run()
results.append(n_emperors)
new_result = sum(results) / n_runs
print("year of {} days needs {} emperors. result is {}".format(days, n_emperors, new_result))
return sum(results) / n_runs
| <filename>holiday/objoriented.py
import random
import sys
TOLERANCE = 0.00001
class Day:
def __init__(self, date):
self.date = date
class Holiday(Day):
holiday = True
class WorkDay(Day):
holiday = False
class Calendar:
def __init__(self, n_days):
self.n_days = n_days
self.days = [WorkDay(date) for date in range(n_days)]
def get_day(self, date):
return self.days[self._normalise_date(date)]
def is_holiday(self, date):
day = self.get_day(date)
return day.holiday
def is_single_workday(self, date):
day = self.get_day(date)
# look out for index error
return self.is_holiday(date + 1) and self.is_holiday(date - 1)
def to_holiday(self, date):
self.days[self._normalise_date(date)] = Holiday(date)
def to_workday(self, date):
self.days[self._normalise_date(date)] = WorkDay(date)
@property
def party_on(self):
return all([self.is_holiday(date) for date in range(self.n_days)])
def _normalise_date(self, date):
return date % self.n_days
class Emperor:
def spawn(calendar):
birthdate = random.randrange(calendar.n_days)
# print("spawning Emperor with birthdate {}".format(birthdate))
if calendar.is_holiday(birthdate):
return
else:
calendar.to_holiday(birthdate)
if calendar.is_single_workday(birthdate + 1):
calendar.to_holiday(birthdate + 1)
if calendar.is_single_workday(birthdate - 1):
calendar.to_holiday(birthdate - 1)
class World:
def __init__(self, n_days):
self.calendar = Calendar(n_days)
def run(self):
counter = 0
while not self.calendar.party_on:
Emperor.spawn(self.calendar)
counter += 1
return counter
def expected_emperors(days):
results = []
new_result = 1
old_result = new_result + TOLERANCE * 2
n_runs = 0
while abs(new_result - old_result) > TOLERANCE:
n_runs += 1
old_result = new_result
world = World(days)
n_emperors = world.run()
results.append(n_emperors)
new_result = sum(results) / n_runs
print("year of {} days needs {} emperors. result is {}".format(days, n_emperors, new_result))
return sum(results) / n_runs
| en | 0.700591 | # look out for index error # print("spawning Emperor with birthdate {}".format(birthdate)) | 3.448435 | 3 |
mysql_cluster_manager/src/mysql_cluster_manager.py | jnidzwetzki/mysql-ha | 8 | 6621402 | <filename>mysql_cluster_manager/src/mysql_cluster_manager.py<gh_stars>1-10
#!/usr/bin/env python3
"""This file is part of the MySQL cluster manager"""
import os
import sys
import logging
import argparse
from mcm.actions import Actions
from mcm.consul import Consul
from mcm.mysql import Mysql
from mcm.proxysql import Proxysql
parser = argparse.ArgumentParser(
description="MySQL cluster manager",
epilog="For more info, please see: https://github.com/jnidzwetzki/mysql-ha-cloud")
AVAILABLE_OPERATIONS = "(join_or_bootstrap, mysql_backup, mysql_restore, mysql_start, mysql_stop)"
parser.add_argument('operation', metavar='operation',
help=f'Operation to be executed {AVAILABLE_OPERATIONS}')
log_levels = ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL')
parser.add_argument('--log-level', default='INFO', choices=log_levels)
# Parse args
args = parser.parse_args()
# Configure logging
logging.basicConfig(level=args.log_level,
format='%(asctime)-15s %(levelname)s %(name)s %(message)s')
# Check for all needed env vars
required_envvars = ['CONSUL_BIND_INTERFACE', 'CONSUL_BOOTSTRAP_SERVER',
'MINIO_ACCESS_KEY', 'MINIO_SECRET_KEY', 'MINIO_URL',
'MYSQL_ROOT_PASSWORD', 'MYSQL_BACKUP_USER', 'MYSQL_BACKUP_PASSWORD',
'MYSQL_REPLICATION_USER', 'MYSQL_REPLICATION_PASSWORD']
for required_var in required_envvars:
if not required_var in os.environ:
logging.error("Required environment %s not found, exiting", required_var)
sys.exit(1)
# Perform operations
if args.operation == 'join_or_bootstrap':
Actions.join_or_bootstrap()
elif args.operation == 'mysql_backup':
Mysql.backup_data()
elif args.operation == 'mysql_restore':
Mysql.restore_backup()
elif args.operation == 'mysql_start':
Mysql.server_start()
elif args.operation == 'mysql_stop':
Mysql.server_stop()
elif args.operation == 'mysql_autobackup':
Mysql.create_backup_if_needed()
elif args.operation == 'proxysql_init':
Proxysql.inital_setup()
nodes = Consul.get_instance().get_all_registered_nodes()
Proxysql.set_mysql_server(nodes)
else:
logging.error("Unknown operation: %s", {args.operation})
sys.exit(1)
| <filename>mysql_cluster_manager/src/mysql_cluster_manager.py<gh_stars>1-10
#!/usr/bin/env python3
"""This file is part of the MySQL cluster manager"""
import os
import sys
import logging
import argparse
from mcm.actions import Actions
from mcm.consul import Consul
from mcm.mysql import Mysql
from mcm.proxysql import Proxysql
parser = argparse.ArgumentParser(
description="MySQL cluster manager",
epilog="For more info, please see: https://github.com/jnidzwetzki/mysql-ha-cloud")
AVAILABLE_OPERATIONS = "(join_or_bootstrap, mysql_backup, mysql_restore, mysql_start, mysql_stop)"
parser.add_argument('operation', metavar='operation',
help=f'Operation to be executed {AVAILABLE_OPERATIONS}')
log_levels = ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL')
parser.add_argument('--log-level', default='INFO', choices=log_levels)
# Parse args
args = parser.parse_args()
# Configure logging
logging.basicConfig(level=args.log_level,
format='%(asctime)-15s %(levelname)s %(name)s %(message)s')
# Check for all needed env vars
required_envvars = ['CONSUL_BIND_INTERFACE', 'CONSUL_BOOTSTRAP_SERVER',
'MINIO_ACCESS_KEY', 'MINIO_SECRET_KEY', 'MINIO_URL',
'MYSQL_ROOT_PASSWORD', 'MYSQL_BACKUP_USER', 'MYSQL_BACKUP_PASSWORD',
'MYSQL_REPLICATION_USER', 'MYSQL_REPLICATION_PASSWORD']
for required_var in required_envvars:
if not required_var in os.environ:
logging.error("Required environment %s not found, exiting", required_var)
sys.exit(1)
# Perform operations
if args.operation == 'join_or_bootstrap':
Actions.join_or_bootstrap()
elif args.operation == 'mysql_backup':
Mysql.backup_data()
elif args.operation == 'mysql_restore':
Mysql.restore_backup()
elif args.operation == 'mysql_start':
Mysql.server_start()
elif args.operation == 'mysql_stop':
Mysql.server_stop()
elif args.operation == 'mysql_autobackup':
Mysql.create_backup_if_needed()
elif args.operation == 'proxysql_init':
Proxysql.inital_setup()
nodes = Consul.get_instance().get_all_registered_nodes()
Proxysql.set_mysql_server(nodes)
else:
logging.error("Unknown operation: %s", {args.operation})
sys.exit(1)
| en | 0.683134 | #!/usr/bin/env python3 This file is part of the MySQL cluster manager # Parse args # Configure logging # Check for all needed env vars # Perform operations | 1.763867 | 2 |
src/roslaunch_monitor/launch_widget.py | nilsbore/roslaunch_monitor | 17 | 6621403 | #!/usr/bin/env python
# Copyright (c) 2011, <NAME>, TU Darmstadt
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the TU Darmstadt nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import division
import os
from python_qt_binding import loadUi
from python_qt_binding.QtCore import Signal, Slot
from python_qt_binding.QtGui import QIcon
from python_qt_binding.QtWidgets import QWidget
import roslib
import rosmsg
import rospkg
import rospy
from qt_gui_py_common.worker_thread import WorkerThread
from rqt_py_common.extended_combo_box import ExtendedComboBox
from rqt_py_common.rqt_roscomm_util import RqtRoscommUtil
from roslaunch_monitor.launch_tree_widget import LaunchTreeWidget
# main class inherits from the ui window class
class LaunchWidget(QWidget):
add_launch = Signal(str, str, str, bool)
change_launch = Signal(int, str, str, str, object)
#publish_once = Signal(int)
remove_launch = Signal(int)
clean_up_launches = Signal()
def __init__(self, parent=None):
super(LaunchWidget, self).__init__(parent)
self._topic_dict = {}
self._update_thread = WorkerThread(self._update_thread_run, self._update_finished)
self._rospack = rospkg.RosPack()
ui_file = os.path.join(self._rospack.get_path('roslaunch_monitor'), 'resource', 'rqt_monitor_plugin.ui')
loadUi(ui_file, self, {'ExtendedComboBox': ExtendedComboBox, 'LaunchTreeWidget': LaunchTreeWidget})
self.refresh_button.setIcon(QIcon.fromTheme('view-refresh'))
self.refresh_button.clicked.connect(self.refresh_combo_boxes)
self.add_launch_button.setIcon(QIcon.fromTheme('add'))
self.remove_launch_button.setIcon(QIcon.fromTheme('remove'))
self.clear_button.setIcon(QIcon.fromTheme('edit-clear'))
self.refresh_combo_boxes()
self.launch_tree_widget.model().item_value_changed.connect(self.change_launch)
self.launch_tree_widget.remove_publisher.connect(self.remove_launch)
#self.publisher_tree_widget.publish_once.connect(self.publish_once)
self.remove_launch_button.clicked.connect(self.launch_tree_widget.remove_selected_publishers)
self.clear_button.clicked.connect(self.clean_up_launches)
def shutdown_plugin(self):
self._update_thread.kill()
@Slot()
def refresh_combo_boxes(self):
self._update_thread.kill()
self.file_combo_box.setEnabled(False)
self.package_combo_box.setEnabled(False)
self.file_combo_box.setEditText('updating...')
self.package_combo_box.setEditText('updating...')
self._update_thread.start()
# this runs in a non-gui thread, so don't access widgets here directly
def _update_thread_run(self):
# update type_combo_box
message_type_names = []
try:
# this only works on fuerte and up
packages = sorted([pkg_tuple[0] for pkg_tuple in rosmsg.iterate_packages(self._rospack, rosmsg.MODE_MSG)])
except:
# this works up to electric
packages = sorted(rosmsg.list_packages())
for package in packages:
for base_type_str in rosmsg.list_msgs(package, rospack=self._rospack):
message_class = roslib.message.get_message_class(base_type_str)
if message_class is not None:
message_type_names.append(base_type_str)
# TODO: get all ROS packages and launch files here instead
#self.type_combo_box.setItems.emit(sorted(message_type_names))
packages = sorted([pkg_tuple[0]
for pkg_tuple
in RqtRoscommUtil.iterate_packages('launch')])
self.package_combo_box.setItems.emit(packages)
# update topic_combo_box
#_, _, topic_types = rospy.get_master().getTopicTypes()
#self._topic_dict = dict(topic_types)
#self.topic_combo_box.setItems.emit(sorted(self._topic_dict.keys()))
@Slot()
def _update_finished(self):
self.package_combo_box.setEnabled(True)
self.file_combo_box.setEnabled(True)
@Slot(str)
def on_package_combo_box_currentIndexChanged(self, package):
#if topic_name in self._topic_dict:
# self.type_combo_box.setEditText(self._topic_dict[topic_name])
#pass
_launch_instance_list = RqtRoscommUtil.list_files(package, 'launch')
_launchfile_instances = [x.split('/')[1] for x in _launch_instance_list]
self.file_combo_box.setItems.emit(_launchfile_instances)
#self.file_combo_box.clear()
#self.file_combo_box.addItems(_launchfile_instances)
@Slot()
def on_add_launch_button_clicked(self):
package_name = str(self.package_combo_box.currentText())
file_name = str(self.file_combo_box.currentText())
arguments = str(self.arguments_combo_box.currentText())
enabled = False
self.add_launch.emit(package_name, file_name, arguments, enabled)
| #!/usr/bin/env python
# Copyright (c) 2011, <NAME>, TU Darmstadt
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the TU Darmstadt nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import division
import os
from python_qt_binding import loadUi
from python_qt_binding.QtCore import Signal, Slot
from python_qt_binding.QtGui import QIcon
from python_qt_binding.QtWidgets import QWidget
import roslib
import rosmsg
import rospkg
import rospy
from qt_gui_py_common.worker_thread import WorkerThread
from rqt_py_common.extended_combo_box import ExtendedComboBox
from rqt_py_common.rqt_roscomm_util import RqtRoscommUtil
from roslaunch_monitor.launch_tree_widget import LaunchTreeWidget
# main class inherits from the ui window class
class LaunchWidget(QWidget):
add_launch = Signal(str, str, str, bool)
change_launch = Signal(int, str, str, str, object)
#publish_once = Signal(int)
remove_launch = Signal(int)
clean_up_launches = Signal()
def __init__(self, parent=None):
super(LaunchWidget, self).__init__(parent)
self._topic_dict = {}
self._update_thread = WorkerThread(self._update_thread_run, self._update_finished)
self._rospack = rospkg.RosPack()
ui_file = os.path.join(self._rospack.get_path('roslaunch_monitor'), 'resource', 'rqt_monitor_plugin.ui')
loadUi(ui_file, self, {'ExtendedComboBox': ExtendedComboBox, 'LaunchTreeWidget': LaunchTreeWidget})
self.refresh_button.setIcon(QIcon.fromTheme('view-refresh'))
self.refresh_button.clicked.connect(self.refresh_combo_boxes)
self.add_launch_button.setIcon(QIcon.fromTheme('add'))
self.remove_launch_button.setIcon(QIcon.fromTheme('remove'))
self.clear_button.setIcon(QIcon.fromTheme('edit-clear'))
self.refresh_combo_boxes()
self.launch_tree_widget.model().item_value_changed.connect(self.change_launch)
self.launch_tree_widget.remove_publisher.connect(self.remove_launch)
#self.publisher_tree_widget.publish_once.connect(self.publish_once)
self.remove_launch_button.clicked.connect(self.launch_tree_widget.remove_selected_publishers)
self.clear_button.clicked.connect(self.clean_up_launches)
def shutdown_plugin(self):
self._update_thread.kill()
@Slot()
def refresh_combo_boxes(self):
self._update_thread.kill()
self.file_combo_box.setEnabled(False)
self.package_combo_box.setEnabled(False)
self.file_combo_box.setEditText('updating...')
self.package_combo_box.setEditText('updating...')
self._update_thread.start()
# this runs in a non-gui thread, so don't access widgets here directly
def _update_thread_run(self):
# update type_combo_box
message_type_names = []
try:
# this only works on fuerte and up
packages = sorted([pkg_tuple[0] for pkg_tuple in rosmsg.iterate_packages(self._rospack, rosmsg.MODE_MSG)])
except:
# this works up to electric
packages = sorted(rosmsg.list_packages())
for package in packages:
for base_type_str in rosmsg.list_msgs(package, rospack=self._rospack):
message_class = roslib.message.get_message_class(base_type_str)
if message_class is not None:
message_type_names.append(base_type_str)
# TODO: get all ROS packages and launch files here instead
#self.type_combo_box.setItems.emit(sorted(message_type_names))
packages = sorted([pkg_tuple[0]
for pkg_tuple
in RqtRoscommUtil.iterate_packages('launch')])
self.package_combo_box.setItems.emit(packages)
# update topic_combo_box
#_, _, topic_types = rospy.get_master().getTopicTypes()
#self._topic_dict = dict(topic_types)
#self.topic_combo_box.setItems.emit(sorted(self._topic_dict.keys()))
@Slot()
def _update_finished(self):
self.package_combo_box.setEnabled(True)
self.file_combo_box.setEnabled(True)
@Slot(str)
def on_package_combo_box_currentIndexChanged(self, package):
#if topic_name in self._topic_dict:
# self.type_combo_box.setEditText(self._topic_dict[topic_name])
#pass
_launch_instance_list = RqtRoscommUtil.list_files(package, 'launch')
_launchfile_instances = [x.split('/')[1] for x in _launch_instance_list]
self.file_combo_box.setItems.emit(_launchfile_instances)
#self.file_combo_box.clear()
#self.file_combo_box.addItems(_launchfile_instances)
@Slot()
def on_add_launch_button_clicked(self):
package_name = str(self.package_combo_box.currentText())
file_name = str(self.file_combo_box.currentText())
arguments = str(self.arguments_combo_box.currentText())
enabled = False
self.add_launch.emit(package_name, file_name, arguments, enabled)
| en | 0.611497 | #!/usr/bin/env python # Copyright (c) 2011, <NAME>, TU Darmstadt # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of the TU Darmstadt nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # main class inherits from the ui window class #publish_once = Signal(int) #self.publisher_tree_widget.publish_once.connect(self.publish_once) # this runs in a non-gui thread, so don't access widgets here directly # update type_combo_box # this only works on fuerte and up # this works up to electric # TODO: get all ROS packages and launch files here instead #self.type_combo_box.setItems.emit(sorted(message_type_names)) # update topic_combo_box #_, _, topic_types = rospy.get_master().getTopicTypes() #self._topic_dict = dict(topic_types) #self.topic_combo_box.setItems.emit(sorted(self._topic_dict.keys())) #if topic_name in self._topic_dict: # self.type_combo_box.setEditText(self._topic_dict[topic_name]) #pass #self.file_combo_box.clear() #self.file_combo_box.addItems(_launchfile_instances) | 1.152571 | 1 |
dqo/query_generator/guided.py | danield137/deep_query_optimzation | 0 | 6621404 | <gh_stars>0
import logging
import os
import time
from collections import defaultdict
from queue import Queue
from typing import Tuple, Callable, Dict, Optional
import numpy as np
from dqo import log_utils
from dqo.db.clients import DatabaseClient
from dqo.db.models import Database
from dqo.lab.query_executor import QueryExecutor
from dqo.query_generator import RandomQueryGen
from dqo.query_generator.query_builder import QueryBuilder
from dqo.query_generator.rl import rargmin
from dqo.query_generator.rl.partitioner import Partitioner, Log2Partitioner
from dqo.relational import Query, SQLParser
logger = logging.getLogger('query_generator.guided')
logger.setLevel(logging.INFO)
indexer = 0
class QueryGenError(Exception):
pass
# todo: add cardinality considerations
class GuidedQueryGen(QueryExecutor):
rqg: RandomQueryGen
scheme: Database
qb: QueryBuilder = None
mem: Dict[Query, float] = defaultdict(float)
cb: Callable[[Query, float], None]
def __init__(
self,
db_client: DatabaseClient,
target: Tuple[float, float],
stop_early: bool = False,
max_steps: int = 100,
name: str = None,
cb: Callable[[Query, float], None] = None,
query_logger: logging.Logger = None,
seed: int = None,
extended: bool = False
):
'''
:param db_client:
:param target: a tuple with (min, max) values to consider a "hit"
:param stop_early: if true, returns done on first occurrence of a "hit"
:param max_steps: limit number of steps
:param cb: ball back to allow extra work on query, runtime tuple
'''
super().__init__(db_client, query_logger=query_logger, extended=extended)
global indexer
indexer += 1
self.cb = cb
self.stop_early = stop_early
self.target = target
self.steps = 0
self.max_steps = max_steps if max_steps is not None else 100
self.name = name or indexer
self._rqg = None
self._qb = None
self._scheme = None
self.seed = seed
self.extended = extended
@property
def scheme(self) -> Database:
if self._scheme is None:
self._scheme = self.dbc.model()
return self._scheme
@property
def qb(self) -> QueryBuilder:
if self._qb is None:
self._qb = QueryBuilder(self.scheme, seed=self.seed)
return self._qb
@property
def rqg(self) -> RandomQueryGen:
if self._rqg is None:
self._rqg = RandomQueryGen(self.scheme)
return self._rqg
def run_query(self, analyze=True) -> Tuple[float, bool]:
if self.qb.q not in self.mem:
if analyze or self.extended:
plan_time, exec_time, exec_plan = self.analyze(self.qb.q)
took = plan_time + exec_time
else:
took = self.time(self.qb.q)
query = self.qb.q.to_sql(pretty=False, alias=False)
if self.cb and callable(self.cb):
self.cb(self.qb.q, took)
self.mem[self.qb.q] = took
runtime = self.mem[self.qb.q]
return runtime, self.hit(runtime)
@property
def current_sql(self) -> str:
return self.qb.q.to_sql(pretty=False, alias=False)
def randomize_initial(self):
self.qb.q = self.rqg.randomize()
self.qb.sync()
def narrow(self):
actions = []
if self.qb.can_remove_projection():
actions.append(self.qb.remove_projection)
if self.qb.can_remove_relation():
actions.append(self.qb.remove_relation)
actions.append(self.qb.add_condition)
action = np.random.choice(actions)
action()
def stay(self):
actions = []
if self.qb.can_remove_projection():
self.qb.remove_projection()
else:
self.qb.add_projection()
def broaden(self):
actions = []
if self.qb.can_add_projection():
actions.append(self.qb.add_projection)
if self.qb.can_add_relation():
actions.append(self.qb.add_relation)
if self.qb.can_remove_condition():
actions.append(self.qb.remove_condition)
if self.qb.can_replace_join():
actions.append(self.qb.replace_join)
if not actions:
raise QueryGenError('no more options to broaden')
action = np.random.choice(actions)
action()
# todo: add a stupid condition, like for a range [0,1], add > 0.1 and < 0.09,
# just to add another scan over the data
# generally this may not have much of an effect, but, for joins, it can wreck havoc
def select_next_action(self, runtime):
_min, _max = self.target
if runtime > _max:
return self.narrow
elif runtime < _min:
return self.broaden
else:
return self.stay
def step(self, prev_runtime: float) -> Tuple[float, str, bool]:
action = self.select_next_action(prev_runtime)
action()
runtime, hit = self.run_query()
done = (self.stop_early and hit) or self.steps >= self.max_steps
return runtime, action.__name__, done
def hit(self, runtime: float):
return self.target[0] <= runtime <= self.target[1]
def guide(self):
self.steps += 1
runtime, done = self.run_query()
while not done:
prev_runtime = runtime
runtime, action_took, done = self.step(runtime)
logger.info(f'step: {self.steps - 1}, prev: {prev_runtime}, action: {action_took}, runtime: {runtime}')
class BalancedQueryGen:
def __init__(
self,
db_client: DatabaseClient,
partitioner: Partitioner = Log2Partitioner(min_value=1, max_value=2 ** 8),
cb: Callable[[Query, float], None] = None,
q_depth: int = 10,
checkpoint: bool = False,
patience: Optional[int] = 10,
name_postfix: Optional[str] = None,
extended=False
):
self.partitioner = partitioner
self.partitions = [0] * partitioner.k
self.user_cb = cb
self.checkpoint_path: str = f'{db_client.humanize_target()}.qcp'
# use queue depth to counter postgres's caching, by checking last
self.q: Queue[Tuple[GuidedQueryGen, float]] = Queue(q_depth)
self.mem: Dict[Query, float] = defaultdict(float)
self.patience = patience
self.extended = extended
def wrapped_cb(q: Query, runtime: float):
if runtime > 0 and q not in self.mem:
self.mem[q] = runtime
partition = self.partitioner.partition(runtime)
self.partitions[partition] += 1
if cb is not None:
cb(q, runtime)
self.db_client = db_client
self.wrapped_cb = wrapped_cb
name_postfix = name_postfix or "_extended" if self.extended else ""
filename = os.path.join(
'runtimes',
f'{db_client.humanize_target()}_{str(int(time.time()))}{name_postfix}.csv'
)
self.query_logger = log_utils.rotating_logger(filename=filename)
self.checkpoint = checkpoint
def load_checkpoint(self):
if not os.path.exists(self.checkpoint_path):
return
with open(self.checkpoint_path) as cp:
lines = cp.readlines()
logger.info(f'loading {len(lines)} queries')
if len(lines) > self.q.maxsize:
self.q = Queue(maxsize=len(lines))
for idx, line in enumerate(lines):
try:
query = SQLParser.to_query(line)
# TODO: this is somewhat pointless, as we don't save the old distribution
min_partition = rargmin(self.partitions)
gqg = GuidedQueryGen(
db_client=self.db_client,
cb=self.wrapped_cb,
target=self.partitioner.bounds(min_partition),
# share logger
query_logger=self.query_logger,
extended=self.extended
)
gqg.qb.q = query
gqg.qb.sync()
self.q.put((gqg, 0))
except:
pass
def save_checkpoint(self):
with open(self.checkpoint_path, 'w+') as cp:
for qgq, _ in list(self.q.queue):
cp.write(qgq.qb.q.to_sql(pretty=False, alias=False) + '\n')
def generate(self, n: int = 100000):
i = 0
if self.checkpoint:
self.load_checkpoint()
tracking: Dict[int, Tuple[int, int]] = defaultdict(lambda x: (0, 0))
logger.info('starting query generation loop')
while len(self.mem.keys()) <= n:
if self.checkpoint and i > 0 and i % self.q.maxsize == 0:
self.save_checkpoint()
if not self.q.full():
min_partition = rargmin(self.partitions)
self.q.put((
GuidedQueryGen(
db_client=self.db_client,
cb=self.wrapped_cb,
target=self.partitioner.bounds(min_partition),
# share logger
query_logger=self.query_logger
), 0)
)
continue
gqn, prev = self.q.get()
i += 1
done = True
runtime = -1
if prev == 0:
if len(gqn.qb.q) == 0:
gqn.randomize_initial()
try:
runtime, done = gqn.run_query()
except Exception as e:
pass
else:
try:
runtime, _, done = gqn.step(prev)
if self.patience is not None:
prev_partition, seq_length = tracking[id(gqn)]
current_partition = self.partitioner.partition(runtime)
reset_count = current_partition != prev_partition
seq_length = 0 if reset_count else seq_length + 1
if seq_length > self.patience:
done = True
del tracking[id(gqn)]
else:
tracking[id(gqn)] = current_partition, seq_length
except:
pass
logger.info(f'[{gqn.name}:{gqn.steps}] - {runtime}')
if not done:
self.q.put((gqn, runtime))
logger.info(f'BalancedQueryGen.generate | i:{i} - partitions: {self.partitions}')
| import logging
import os
import time
from collections import defaultdict
from queue import Queue
from typing import Tuple, Callable, Dict, Optional
import numpy as np
from dqo import log_utils
from dqo.db.clients import DatabaseClient
from dqo.db.models import Database
from dqo.lab.query_executor import QueryExecutor
from dqo.query_generator import RandomQueryGen
from dqo.query_generator.query_builder import QueryBuilder
from dqo.query_generator.rl import rargmin
from dqo.query_generator.rl.partitioner import Partitioner, Log2Partitioner
from dqo.relational import Query, SQLParser
logger = logging.getLogger('query_generator.guided')
logger.setLevel(logging.INFO)
indexer = 0
class QueryGenError(Exception):
pass
# todo: add cardinality considerations
class GuidedQueryGen(QueryExecutor):
rqg: RandomQueryGen
scheme: Database
qb: QueryBuilder = None
mem: Dict[Query, float] = defaultdict(float)
cb: Callable[[Query, float], None]
def __init__(
self,
db_client: DatabaseClient,
target: Tuple[float, float],
stop_early: bool = False,
max_steps: int = 100,
name: str = None,
cb: Callable[[Query, float], None] = None,
query_logger: logging.Logger = None,
seed: int = None,
extended: bool = False
):
'''
:param db_client:
:param target: a tuple with (min, max) values to consider a "hit"
:param stop_early: if true, returns done on first occurrence of a "hit"
:param max_steps: limit number of steps
:param cb: ball back to allow extra work on query, runtime tuple
'''
super().__init__(db_client, query_logger=query_logger, extended=extended)
global indexer
indexer += 1
self.cb = cb
self.stop_early = stop_early
self.target = target
self.steps = 0
self.max_steps = max_steps if max_steps is not None else 100
self.name = name or indexer
self._rqg = None
self._qb = None
self._scheme = None
self.seed = seed
self.extended = extended
@property
def scheme(self) -> Database:
if self._scheme is None:
self._scheme = self.dbc.model()
return self._scheme
@property
def qb(self) -> QueryBuilder:
if self._qb is None:
self._qb = QueryBuilder(self.scheme, seed=self.seed)
return self._qb
@property
def rqg(self) -> RandomQueryGen:
if self._rqg is None:
self._rqg = RandomQueryGen(self.scheme)
return self._rqg
def run_query(self, analyze=True) -> Tuple[float, bool]:
if self.qb.q not in self.mem:
if analyze or self.extended:
plan_time, exec_time, exec_plan = self.analyze(self.qb.q)
took = plan_time + exec_time
else:
took = self.time(self.qb.q)
query = self.qb.q.to_sql(pretty=False, alias=False)
if self.cb and callable(self.cb):
self.cb(self.qb.q, took)
self.mem[self.qb.q] = took
runtime = self.mem[self.qb.q]
return runtime, self.hit(runtime)
@property
def current_sql(self) -> str:
return self.qb.q.to_sql(pretty=False, alias=False)
def randomize_initial(self):
self.qb.q = self.rqg.randomize()
self.qb.sync()
def narrow(self):
actions = []
if self.qb.can_remove_projection():
actions.append(self.qb.remove_projection)
if self.qb.can_remove_relation():
actions.append(self.qb.remove_relation)
actions.append(self.qb.add_condition)
action = np.random.choice(actions)
action()
def stay(self):
actions = []
if self.qb.can_remove_projection():
self.qb.remove_projection()
else:
self.qb.add_projection()
def broaden(self):
actions = []
if self.qb.can_add_projection():
actions.append(self.qb.add_projection)
if self.qb.can_add_relation():
actions.append(self.qb.add_relation)
if self.qb.can_remove_condition():
actions.append(self.qb.remove_condition)
if self.qb.can_replace_join():
actions.append(self.qb.replace_join)
if not actions:
raise QueryGenError('no more options to broaden')
action = np.random.choice(actions)
action()
# todo: add a stupid condition, like for a range [0,1], add > 0.1 and < 0.09,
# just to add another scan over the data
# generally this may not have much of an effect, but, for joins, it can wreck havoc
def select_next_action(self, runtime):
_min, _max = self.target
if runtime > _max:
return self.narrow
elif runtime < _min:
return self.broaden
else:
return self.stay
def step(self, prev_runtime: float) -> Tuple[float, str, bool]:
action = self.select_next_action(prev_runtime)
action()
runtime, hit = self.run_query()
done = (self.stop_early and hit) or self.steps >= self.max_steps
return runtime, action.__name__, done
def hit(self, runtime: float):
return self.target[0] <= runtime <= self.target[1]
def guide(self):
self.steps += 1
runtime, done = self.run_query()
while not done:
prev_runtime = runtime
runtime, action_took, done = self.step(runtime)
logger.info(f'step: {self.steps - 1}, prev: {prev_runtime}, action: {action_took}, runtime: {runtime}')
class BalancedQueryGen:
def __init__(
self,
db_client: DatabaseClient,
partitioner: Partitioner = Log2Partitioner(min_value=1, max_value=2 ** 8),
cb: Callable[[Query, float], None] = None,
q_depth: int = 10,
checkpoint: bool = False,
patience: Optional[int] = 10,
name_postfix: Optional[str] = None,
extended=False
):
self.partitioner = partitioner
self.partitions = [0] * partitioner.k
self.user_cb = cb
self.checkpoint_path: str = f'{db_client.humanize_target()}.qcp'
# use queue depth to counter postgres's caching, by checking last
self.q: Queue[Tuple[GuidedQueryGen, float]] = Queue(q_depth)
self.mem: Dict[Query, float] = defaultdict(float)
self.patience = patience
self.extended = extended
def wrapped_cb(q: Query, runtime: float):
if runtime > 0 and q not in self.mem:
self.mem[q] = runtime
partition = self.partitioner.partition(runtime)
self.partitions[partition] += 1
if cb is not None:
cb(q, runtime)
self.db_client = db_client
self.wrapped_cb = wrapped_cb
name_postfix = name_postfix or "_extended" if self.extended else ""
filename = os.path.join(
'runtimes',
f'{db_client.humanize_target()}_{str(int(time.time()))}{name_postfix}.csv'
)
self.query_logger = log_utils.rotating_logger(filename=filename)
self.checkpoint = checkpoint
def load_checkpoint(self):
if not os.path.exists(self.checkpoint_path):
return
with open(self.checkpoint_path) as cp:
lines = cp.readlines()
logger.info(f'loading {len(lines)} queries')
if len(lines) > self.q.maxsize:
self.q = Queue(maxsize=len(lines))
for idx, line in enumerate(lines):
try:
query = SQLParser.to_query(line)
# TODO: this is somewhat pointless, as we don't save the old distribution
min_partition = rargmin(self.partitions)
gqg = GuidedQueryGen(
db_client=self.db_client,
cb=self.wrapped_cb,
target=self.partitioner.bounds(min_partition),
# share logger
query_logger=self.query_logger,
extended=self.extended
)
gqg.qb.q = query
gqg.qb.sync()
self.q.put((gqg, 0))
except:
pass
def save_checkpoint(self):
with open(self.checkpoint_path, 'w+') as cp:
for qgq, _ in list(self.q.queue):
cp.write(qgq.qb.q.to_sql(pretty=False, alias=False) + '\n')
def generate(self, n: int = 100000):
i = 0
if self.checkpoint:
self.load_checkpoint()
tracking: Dict[int, Tuple[int, int]] = defaultdict(lambda x: (0, 0))
logger.info('starting query generation loop')
while len(self.mem.keys()) <= n:
if self.checkpoint and i > 0 and i % self.q.maxsize == 0:
self.save_checkpoint()
if not self.q.full():
min_partition = rargmin(self.partitions)
self.q.put((
GuidedQueryGen(
db_client=self.db_client,
cb=self.wrapped_cb,
target=self.partitioner.bounds(min_partition),
# share logger
query_logger=self.query_logger
), 0)
)
continue
gqn, prev = self.q.get()
i += 1
done = True
runtime = -1
if prev == 0:
if len(gqn.qb.q) == 0:
gqn.randomize_initial()
try:
runtime, done = gqn.run_query()
except Exception as e:
pass
else:
try:
runtime, _, done = gqn.step(prev)
if self.patience is not None:
prev_partition, seq_length = tracking[id(gqn)]
current_partition = self.partitioner.partition(runtime)
reset_count = current_partition != prev_partition
seq_length = 0 if reset_count else seq_length + 1
if seq_length > self.patience:
done = True
del tracking[id(gqn)]
else:
tracking[id(gqn)] = current_partition, seq_length
except:
pass
logger.info(f'[{gqn.name}:{gqn.steps}] - {runtime}')
if not done:
self.q.put((gqn, runtime))
logger.info(f'BalancedQueryGen.generate | i:{i} - partitions: {self.partitions}') | en | 0.839311 | # todo: add cardinality considerations :param db_client: :param target: a tuple with (min, max) values to consider a "hit" :param stop_early: if true, returns done on first occurrence of a "hit" :param max_steps: limit number of steps :param cb: ball back to allow extra work on query, runtime tuple # todo: add a stupid condition, like for a range [0,1], add > 0.1 and < 0.09, # just to add another scan over the data # generally this may not have much of an effect, but, for joins, it can wreck havoc # use queue depth to counter postgres's caching, by checking last # TODO: this is somewhat pointless, as we don't save the old distribution # share logger # share logger | 2.106559 | 2 |
fixture/session.py | technodeath/pyth | 0 | 6621405 | <filename>fixture/session.py
class SessionHelper:
def __init__(self, app):
self.app = app
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("Sign out").click()
def login(self, username, password):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_link_text("Sign in").click()
wd.find_element_by_id("email").click()
wd.find_element_by_id("email").clear()
wd.find_element_by_id("email").send_keys(username)
wd.find_element_by_id("passwd").clear()
wd.find_element_by_id("passwd").send_keys(password)
wd.find_element_by_xpath("//button[@id='SubmitLogin']/span").click() | <filename>fixture/session.py
class SessionHelper:
def __init__(self, app):
self.app = app
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("Sign out").click()
def login(self, username, password):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_link_text("Sign in").click()
wd.find_element_by_id("email").click()
wd.find_element_by_id("email").clear()
wd.find_element_by_id("email").send_keys(username)
wd.find_element_by_id("passwd").clear()
wd.find_element_by_id("passwd").send_keys(password)
wd.find_element_by_xpath("//button[@id='SubmitLogin']/span").click() | none | 1 | 2.528485 | 3 | |
index.py | Pedroh1510/Video-maker-python | 2 | 6621406 | <gh_stars>1-10
import os
from robots.userInput import user
from robots.text import robotText
# # from robots.voice import robotVoice
from robots.images import robotImages
from robots.userInputEnv import userEnv
from robots.video import robotVideo
from robots.youtube import robotYoutube
from robots.state import loadContent
import json
def start():
try:
userEnv()
except:
user()
robotText()
robotImages()
robotVideo()
robotYoutube()
# robotVoice()
if __name__ == "__main__":
print('> Start!')
start()
# print(json.dumps(loadContent()['sentences'], indent=2))
# print(json.dumps(loadContent()['sentences']))
print('> Terminated')
| import os
from robots.userInput import user
from robots.text import robotText
# # from robots.voice import robotVoice
from robots.images import robotImages
from robots.userInputEnv import userEnv
from robots.video import robotVideo
from robots.youtube import robotYoutube
from robots.state import loadContent
import json
def start():
try:
userEnv()
except:
user()
robotText()
robotImages()
robotVideo()
robotYoutube()
# robotVoice()
if __name__ == "__main__":
print('> Start!')
start()
# print(json.dumps(loadContent()['sentences'], indent=2))
# print(json.dumps(loadContent()['sentences']))
print('> Terminated') | en | 0.482346 | # # from robots.voice import robotVoice # robotVoice() # print(json.dumps(loadContent()['sentences'], indent=2)) # print(json.dumps(loadContent()['sentences'])) | 2.174827 | 2 |
tests/haskell/partialParallelSimpleAdd.py | David-Durst/aetherling | 10 | 6621407 | from aetherling.modules.reduce import DefineReduceSequential, DefineReduceParallelWithIdentity, renameCircuitForReduce
from aetherling.modules.register_any_type import DefineRegisterAnyType
from aetherling.modules.term_any_type import TermAnyType
from aetherling.modules.noop import DefineNoop
from magma.backend.coreir_ import CoreIRBackend
from magma.bitutils import *
from coreir.context import *
from magma.simulator.coreir_simulator import CoreIRSimulator
import coreir
from magma.scope import Scope
from mantle.coreir.arith import *
from mantle.coreir.logic import *
from mantle.coreir.compare import *
from mantle.coreir import DefineCoreirConst
from mantle.coreir.LUT import *
from aetherling.modules.upsample import *
from aetherling.modules.downsample import *
from aetherling.modules.reduce import *
from aetherling.modules.native_linebuffer.two_dimensional_native_linebuffer import DefineTwoDimensionalLineBuffer
args = ['I0', Array[8, In(Bit)], 'I1', Array[8, In(Bit)], 'O0', Array[8, Out(Bit)], 'O1', Array[8, Out(Bit)], 'valid_data_in', In(Bit), 'ready_data_in', Out(Bit), 'valid_data_out', Out(Bit), 'ready_data_out', In(Bit), ] + ClockInterface(has_ce=True)
partialParallelSimpleAdd = DefineCircuit('partialParallelSimpleAdd_Circuit', *args)
magmaInstance0 = DefineNoop(DefineCoreirConst(8, 1))()
magmaInstance1 = DefineNoop(DefineCoreirConst(8, 1))()
magmaInstance2 = DefineCoreirConst(8, 1)()
magmaInstance3 = DefineCoreirConst(8, 1)()
magmaInstance5 = DefineAdd(8)()
magmaInstance6 = DefineAdd(8)()
wire(magmaInstance0.O, magmaInstance5.I0)
wire(magmaInstance2.O, magmaInstance5.I1)
wire(magmaInstance1.O, magmaInstance6.I0)
wire(magmaInstance3.O, magmaInstance6.I1)
wire(partialParallelSimpleAdd.I0, magmaInstance0.in_O)
wire(partialParallelSimpleAdd.I1, magmaInstance1.in_O)
wire(partialParallelSimpleAdd.O0, magmaInstance5.O)
wire(partialParallelSimpleAdd.O1, magmaInstance6.O)
wire(partialParallelSimpleAdd.ready_data_out, partialParallelSimpleAdd.ready_data_in)
wire(partialParallelSimpleAdd.valid_data_in, partialParallelSimpleAdd.valid_data_out)
ceTerm = TermAnyType(Enable)
wire(ceTerm.I, partialParallelSimpleAdd.CE)
EndCircuit()
| from aetherling.modules.reduce import DefineReduceSequential, DefineReduceParallelWithIdentity, renameCircuitForReduce
from aetherling.modules.register_any_type import DefineRegisterAnyType
from aetherling.modules.term_any_type import TermAnyType
from aetherling.modules.noop import DefineNoop
from magma.backend.coreir_ import CoreIRBackend
from magma.bitutils import *
from coreir.context import *
from magma.simulator.coreir_simulator import CoreIRSimulator
import coreir
from magma.scope import Scope
from mantle.coreir.arith import *
from mantle.coreir.logic import *
from mantle.coreir.compare import *
from mantle.coreir import DefineCoreirConst
from mantle.coreir.LUT import *
from aetherling.modules.upsample import *
from aetherling.modules.downsample import *
from aetherling.modules.reduce import *
from aetherling.modules.native_linebuffer.two_dimensional_native_linebuffer import DefineTwoDimensionalLineBuffer
args = ['I0', Array[8, In(Bit)], 'I1', Array[8, In(Bit)], 'O0', Array[8, Out(Bit)], 'O1', Array[8, Out(Bit)], 'valid_data_in', In(Bit), 'ready_data_in', Out(Bit), 'valid_data_out', Out(Bit), 'ready_data_out', In(Bit), ] + ClockInterface(has_ce=True)
partialParallelSimpleAdd = DefineCircuit('partialParallelSimpleAdd_Circuit', *args)
magmaInstance0 = DefineNoop(DefineCoreirConst(8, 1))()
magmaInstance1 = DefineNoop(DefineCoreirConst(8, 1))()
magmaInstance2 = DefineCoreirConst(8, 1)()
magmaInstance3 = DefineCoreirConst(8, 1)()
magmaInstance5 = DefineAdd(8)()
magmaInstance6 = DefineAdd(8)()
wire(magmaInstance0.O, magmaInstance5.I0)
wire(magmaInstance2.O, magmaInstance5.I1)
wire(magmaInstance1.O, magmaInstance6.I0)
wire(magmaInstance3.O, magmaInstance6.I1)
wire(partialParallelSimpleAdd.I0, magmaInstance0.in_O)
wire(partialParallelSimpleAdd.I1, magmaInstance1.in_O)
wire(partialParallelSimpleAdd.O0, magmaInstance5.O)
wire(partialParallelSimpleAdd.O1, magmaInstance6.O)
wire(partialParallelSimpleAdd.ready_data_out, partialParallelSimpleAdd.ready_data_in)
wire(partialParallelSimpleAdd.valid_data_in, partialParallelSimpleAdd.valid_data_out)
ceTerm = TermAnyType(Enable)
wire(ceTerm.I, partialParallelSimpleAdd.CE)
EndCircuit()
| none | 1 | 1.545103 | 2 | |
src/service/ssm.py | neovasili/aws-codebuild | 4 | 6621408 | <gh_stars>1-10
import boto3
import logging
class SSMService:
def __init__(self, region: str, logger: logging.Logger):
self.__logger = logger
self.ssm_client = boto3.client("ssm", region_name=region)
def get_buildspec_override_feature_flag(self):
try:
response = self.ssm_client.get_parameter(
Name="/github/buildspec/override",
)
self.__logger.info(response["Parameter"]["Value"])
if response["Parameter"]["Value"] == "True":
return True
except Exception:
self.__logger.debug("Buildspec override is not enabled")
return False
def get_override_image(self, ssm_parameter: str, commit_id: str, tag: str = None, tag_prefix: str = None):
response = self.ssm_client.get_parameter(
Name=ssm_parameter,
)
image = response["Parameter"]["Value"]
if tag is not None:
self.__logger.debug(f"Going to use image with tag {tag}")
return f"{image}:{tag}"
if tag_prefix is not None:
self.__logger.debug(f"Going to use image with tag prefix {tag_prefix} and commit id {commit_id}")
return f"{image}:{tag_prefix}_{commit_id}"
self.__logger.debug(f"Going to use image with commit id {commit_id}")
return f"{image}:{commit_id}"
| import boto3
import logging
class SSMService:
def __init__(self, region: str, logger: logging.Logger):
self.__logger = logger
self.ssm_client = boto3.client("ssm", region_name=region)
def get_buildspec_override_feature_flag(self):
try:
response = self.ssm_client.get_parameter(
Name="/github/buildspec/override",
)
self.__logger.info(response["Parameter"]["Value"])
if response["Parameter"]["Value"] == "True":
return True
except Exception:
self.__logger.debug("Buildspec override is not enabled")
return False
def get_override_image(self, ssm_parameter: str, commit_id: str, tag: str = None, tag_prefix: str = None):
response = self.ssm_client.get_parameter(
Name=ssm_parameter,
)
image = response["Parameter"]["Value"]
if tag is not None:
self.__logger.debug(f"Going to use image with tag {tag}")
return f"{image}:{tag}"
if tag_prefix is not None:
self.__logger.debug(f"Going to use image with tag prefix {tag_prefix} and commit id {commit_id}")
return f"{image}:{tag_prefix}_{commit_id}"
self.__logger.debug(f"Going to use image with commit id {commit_id}")
return f"{image}:{commit_id}" | none | 1 | 2.015529 | 2 | |
contrib/discodex/lib/discodex/restapi/serdes.py | kostis/disco | 1 | 6621409 | """
Serializers/Deserializers are the main tools for automatic RESTful APIs.
Users can use the builtins automagically or define their own.
"""
from django.core import serializers
class SerDes(object):
"""
Base class for a serializer/deserializer. Methods are implemented just for demonstration, should use a subclass.
"""
opener, separator, closer = ('[', ', ', ']')
def serialize(self, obj):
return '%r' % obj
def serialize_all(self, obj_iter):
return '%s%s%s' % (self.opener, self.separator.join([self.serialize(obj) for obj in obj_iter]), self.closer)
def deserialize(self, string):
return eval(string)
def deserialize_all(self, composite_string):
return (deserialize(string) for string in composite_string.lstrip(self.opener).rstrip(self.closer).split(self.separator))
class BuiltinSerDes(SerDes):
def __init__(self, format, fields=None):
self.format = format
self.fields = fields
def serialize(self, obj):
return serializers.serialize(self.format, [obj], fields=self.fields)
def serialize_all(self, obj_iter):
return serializers.serialize(self.format, obj_iter, fields=self.fields)
def deserialize(self, string):
deserialized_objects = list(serializers.deserialize(self.format, string))
assert len(deserialized_objects) == 1
return deserialized_objects.pop()
def deserialize_all(self, composite_string):
return serializers.deserialize(self.format, composite_string)
# Convenience singletons
XMLSerDes = BuiltinSerDes('xml')
JSONSerDes = BuiltinSerDes('json')
| """
Serializers/Deserializers are the main tools for automatic RESTful APIs.
Users can use the builtins automagically or define their own.
"""
from django.core import serializers
class SerDes(object):
"""
Base class for a serializer/deserializer. Methods are implemented just for demonstration, should use a subclass.
"""
opener, separator, closer = ('[', ', ', ']')
def serialize(self, obj):
return '%r' % obj
def serialize_all(self, obj_iter):
return '%s%s%s' % (self.opener, self.separator.join([self.serialize(obj) for obj in obj_iter]), self.closer)
def deserialize(self, string):
return eval(string)
def deserialize_all(self, composite_string):
return (deserialize(string) for string in composite_string.lstrip(self.opener).rstrip(self.closer).split(self.separator))
class BuiltinSerDes(SerDes):
def __init__(self, format, fields=None):
self.format = format
self.fields = fields
def serialize(self, obj):
return serializers.serialize(self.format, [obj], fields=self.fields)
def serialize_all(self, obj_iter):
return serializers.serialize(self.format, obj_iter, fields=self.fields)
def deserialize(self, string):
deserialized_objects = list(serializers.deserialize(self.format, string))
assert len(deserialized_objects) == 1
return deserialized_objects.pop()
def deserialize_all(self, composite_string):
return serializers.deserialize(self.format, composite_string)
# Convenience singletons
XMLSerDes = BuiltinSerDes('xml')
JSONSerDes = BuiltinSerDes('json')
| en | 0.838493 | Serializers/Deserializers are the main tools for automatic RESTful APIs. Users can use the builtins automagically or define their own. Base class for a serializer/deserializer. Methods are implemented just for demonstration, should use a subclass. # Convenience singletons | 2.865008 | 3 |
Debugging/01 - exceptions.py | Piraato/Learn-Python | 0 | 6621410 | <gh_stars>0
# Throwing custom exceptions
#raise Exception('Now you really broke the program!')
'''
This results in the following Traceback:
Exception: Now you really broke the program!
'''
# Inside a function
def brokenCalculator(num1, operator, num2):
if len(operator) != 1:
raise Exception('Invalid operator')
if not num1.isdecimal():
raise Exception('Invalid first number')
if not num2.isdecimal():
raise Exception('Invalid second number')
print(num1, operator, num2)
for n1, op, n2 in ((10, 'ok', 12), ('four', 'x', 'ten'), (1.002, 'x', '12'), ('13', 'x', 'a'), ('12', '*', '2')):
try:
brokenCalculator(n1, op, n2)
except Exception as e:
print('Whoops: ' + str(e)) # Prints the exception for every input given in the for loop
| # Throwing custom exceptions
#raise Exception('Now you really broke the program!')
'''
This results in the following Traceback:
Exception: Now you really broke the program!
'''
# Inside a function
def brokenCalculator(num1, operator, num2):
if len(operator) != 1:
raise Exception('Invalid operator')
if not num1.isdecimal():
raise Exception('Invalid first number')
if not num2.isdecimal():
raise Exception('Invalid second number')
print(num1, operator, num2)
for n1, op, n2 in ((10, 'ok', 12), ('four', 'x', 'ten'), (1.002, 'x', '12'), ('13', 'x', 'a'), ('12', '*', '2')):
try:
brokenCalculator(n1, op, n2)
except Exception as e:
print('Whoops: ' + str(e)) # Prints the exception for every input given in the for loop | en | 0.833223 | # Throwing custom exceptions #raise Exception('Now you really broke the program!') This results in the following Traceback: Exception: Now you really broke the program! # Inside a function # Prints the exception for every input given in the for loop | 4.188064 | 4 |
flag_gen/to_string.py | benwernicke/crygen | 0 | 6621411 | with open("example.h", "r") as fh:
for line in fh:
line = line.strip()
print(f'"{line}\\n"')
| with open("example.h", "r") as fh:
for line in fh:
line = line.strip()
print(f'"{line}\\n"')
| none | 1 | 2.749815 | 3 | |
bootstrap_customizer/middleware.py | johnfraney/django-bootstrap-customizer | 46 | 6621412 | <gh_stars>10-100
from django.utils.deprecation import MiddlewareMixin
from bootstrap_customizer.models import BootstrapTheme
class BootstrapThemeMiddleware(MiddlewareMixin):
"""
Middleware that sets `bootstrap_theme_updated` attribute to request object.
"""
def process_request(self, request):
theme = BootstrapTheme.objects.filter(sitebootstraptheme__site=request.site).first()
if theme:
request.bootstrap_theme = theme
| from django.utils.deprecation import MiddlewareMixin
from bootstrap_customizer.models import BootstrapTheme
class BootstrapThemeMiddleware(MiddlewareMixin):
"""
Middleware that sets `bootstrap_theme_updated` attribute to request object.
"""
def process_request(self, request):
theme = BootstrapTheme.objects.filter(sitebootstraptheme__site=request.site).first()
if theme:
request.bootstrap_theme = theme | en | 0.743911 | Middleware that sets `bootstrap_theme_updated` attribute to request object. | 1.952919 | 2 |
klee.py | TCatshoek/AISTRTestcaseEvaluator | 0 | 6621413 | from multiprocessing import Pool
from itertools import chain
import subprocess
import re
import os
import time
import datetime
# Use multiprocessing here since it is quite slow, and we can easily evaluate multiple testcases at the same time
def worker(args):
testcase, problem_path, mtime = args
found_errors = set()
output = subprocess.run([f"KTEST_FILE={testcase} {problem_path}"], shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
errors = re.findall(r"error_\d+", output.stderr.decode())
for error in errors:
found_errors.add((error, mtime))
return found_errors
def klee(testcases_path, problem_path):
# Figure out start timestamp
lowest_mtime = None
with testcases_path.joinpath("info").open('r') as file:
for line in file.readlines():
match = re.match(r'Started: (\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)', line)
if match:
year = int(match.group(1))
month = int(match.group(2))
day = int(match.group(3))
hour = int(match.group(4))
minutes = int(match.group(5))
seconds = int(match.group(6))
d = datetime.datetime(year, month, day, hour, minutes, seconds)
lowest_mtime = time.mktime(d.timetuple())
assert lowest_mtime is not None, "Could not determine klee start time"
testcases = [(x, problem_path, os.path.getmtime(x)) for x in testcases_path.glob("*.ktest")]
n_testcases = len(testcases)
print(f"Running {n_testcases} testcases, this might take a while...")
with Pool(os.cpu_count()) as p:
all_errors = p.map(worker, testcases)
all_errors = set(chain(*all_errors))
return lowest_mtime, all_errors
| from multiprocessing import Pool
from itertools import chain
import subprocess
import re
import os
import time
import datetime
# Use multiprocessing here since it is quite slow, and we can easily evaluate multiple testcases at the same time
def worker(args):
testcase, problem_path, mtime = args
found_errors = set()
output = subprocess.run([f"KTEST_FILE={testcase} {problem_path}"], shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
errors = re.findall(r"error_\d+", output.stderr.decode())
for error in errors:
found_errors.add((error, mtime))
return found_errors
def klee(testcases_path, problem_path):
# Figure out start timestamp
lowest_mtime = None
with testcases_path.joinpath("info").open('r') as file:
for line in file.readlines():
match = re.match(r'Started: (\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)', line)
if match:
year = int(match.group(1))
month = int(match.group(2))
day = int(match.group(3))
hour = int(match.group(4))
minutes = int(match.group(5))
seconds = int(match.group(6))
d = datetime.datetime(year, month, day, hour, minutes, seconds)
lowest_mtime = time.mktime(d.timetuple())
assert lowest_mtime is not None, "Could not determine klee start time"
testcases = [(x, problem_path, os.path.getmtime(x)) for x in testcases_path.glob("*.ktest")]
n_testcases = len(testcases)
print(f"Running {n_testcases} testcases, this might take a while...")
with Pool(os.cpu_count()) as p:
all_errors = p.map(worker, testcases)
all_errors = set(chain(*all_errors))
return lowest_mtime, all_errors
| en | 0.890607 | # Use multiprocessing here since it is quite slow, and we can easily evaluate multiple testcases at the same time # Figure out start timestamp | 2.801365 | 3 |
cybo/data/dataset_readers/dataset_reader.py | bo-ke/cybo | 2 | 6621414 | <gh_stars>1-10
# -*- coding: utf-8 -*-
'''
@author: kebo
@contact: <EMAIL>
@version: 1.0
@file: dataset_reader.py
@time: 2020/12/16 22:25:09
这一行开始写关于本文件的说明与解释
'''
import json
import tensorflow as tf
from collections import Iterator
from pydantic import BaseModel
from typing import List, Dict
from cybo.common.logger import logger
from cybo.common.checks import ConfigurationError
from cybo.data.vocabulary import Vocabulary
from cybo.data.tokenizers import Tokenizer
class InputExample(BaseModel):
guid: int
def count_vocab_items(self, counter: Dict[str, Dict[str, int]]):
for k, v in self.dict(exclude={"guid": ...}).items():
if isinstance(v, list):
for i in v:
counter[k][i] += 1
else:
counter[k][v] += 1
class InputFeatures(BaseModel):
input_ids: List[int]
@classmethod
def output_types(cls):
# return {"input_ids": tf.int32, "label": tf.int32}
raise NotImplementedError
class DatasetReader():
def __init__(self, tokenizer: Tokenizer = None) -> None:
self._tokenizer = tokenizer
def get_examples(self, filepath) -> List[InputExample]:
raise NotImplementedError
@classmethod
def read_file(cls, filepath):
lines = open(filepath, "r", encoding="utf-8").readlines()
return lines
def convert_examples_to_features(
self, examples: List[InputExample],
vocab: Vocabulary, max_seq_length: int = 32,
verbose: bool = False):
features = []
for (ex_index, example) in enumerate(examples):
features_item = self._convert_example_to_features(
example, vocab, max_seq_length)
if verbose:
if ex_index <= 5:
print("*** Example ** *")
for k, v in example.dict().items():
print(f"{k}: {v}")
print("*** Features ** *")
for k, v in features_item.dict().items():
print(f"{k}: {v}")
features.append(features_item)
return features
def _convert_example_to_features(
self, example: InputExample,
vocab: Vocabulary, max_seq_length: int = 32) -> InputFeatures:
raise NotImplementedError
def encode_plus(self, text, *args, **kwargs) -> Dict:
raise NotImplementedError
@classmethod
def _truncated_add_padded(cls, tokens, max_seq_length, padding_token=0):
if len(tokens) > max_seq_length:
tokens = tokens[:max_seq_length]
else:
tokens = tokens + [padding_token] * (max_seq_length - len(tokens))
return tokens
| # -*- coding: utf-8 -*-
'''
@author: kebo
@contact: <EMAIL>
@version: 1.0
@file: dataset_reader.py
@time: 2020/12/16 22:25:09
这一行开始写关于本文件的说明与解释
'''
import json
import tensorflow as tf
from collections import Iterator
from pydantic import BaseModel
from typing import List, Dict
from cybo.common.logger import logger
from cybo.common.checks import ConfigurationError
from cybo.data.vocabulary import Vocabulary
from cybo.data.tokenizers import Tokenizer
class InputExample(BaseModel):
guid: int
def count_vocab_items(self, counter: Dict[str, Dict[str, int]]):
for k, v in self.dict(exclude={"guid": ...}).items():
if isinstance(v, list):
for i in v:
counter[k][i] += 1
else:
counter[k][v] += 1
class InputFeatures(BaseModel):
input_ids: List[int]
@classmethod
def output_types(cls):
# return {"input_ids": tf.int32, "label": tf.int32}
raise NotImplementedError
class DatasetReader():
def __init__(self, tokenizer: Tokenizer = None) -> None:
self._tokenizer = tokenizer
def get_examples(self, filepath) -> List[InputExample]:
raise NotImplementedError
@classmethod
def read_file(cls, filepath):
lines = open(filepath, "r", encoding="utf-8").readlines()
return lines
def convert_examples_to_features(
self, examples: List[InputExample],
vocab: Vocabulary, max_seq_length: int = 32,
verbose: bool = False):
features = []
for (ex_index, example) in enumerate(examples):
features_item = self._convert_example_to_features(
example, vocab, max_seq_length)
if verbose:
if ex_index <= 5:
print("*** Example ** *")
for k, v in example.dict().items():
print(f"{k}: {v}")
print("*** Features ** *")
for k, v in features_item.dict().items():
print(f"{k}: {v}")
features.append(features_item)
return features
def _convert_example_to_features(
self, example: InputExample,
vocab: Vocabulary, max_seq_length: int = 32) -> InputFeatures:
raise NotImplementedError
def encode_plus(self, text, *args, **kwargs) -> Dict:
raise NotImplementedError
@classmethod
def _truncated_add_padded(cls, tokens, max_seq_length, padding_token=0):
if len(tokens) > max_seq_length:
tokens = tokens[:max_seq_length]
else:
tokens = tokens + [padding_token] * (max_seq_length - len(tokens))
return tokens | zh | 0.189132 | # -*- coding: utf-8 -*- @author: kebo @contact: <EMAIL> @version: 1.0 @file: dataset_reader.py @time: 2020/12/16 22:25:09 这一行开始写关于本文件的说明与解释 # return {"input_ids": tf.int32, "label": tf.int32} | 2.336034 | 2 |
ex102.py | Gustavo-Dev-Web/python | 0 | 6621415 | def fatorial(inicio,show):
from math import factorial
for c in range(inicio,0,-1):
if show:
print(f'{c}',end=' ')
if c > 1:
print('x ',end='')
else:
print('= ',end='')
print(factorial(inicio))
return factorial(inicio)
fatorial(inicio=6,show=False)
| def fatorial(inicio,show):
from math import factorial
for c in range(inicio,0,-1):
if show:
print(f'{c}',end=' ')
if c > 1:
print('x ',end='')
else:
print('= ',end='')
print(factorial(inicio))
return factorial(inicio)
fatorial(inicio=6,show=False)
| none | 1 | 3.812068 | 4 | |
AdvBox/attacks/gradient_method.py | csdongxian/PaddleSleeve | 0 | 6621416 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides the implementation for FGSM attack method.
"""
from __future__ import division
import logging
from collections import Iterable
import numpy as np
import paddle
from .base import Attack
__all__ = [
'GradientMethodAttack', 'FastGradientSignMethodAttack', 'FGSM',
'FastGradientSignMethodTargetedAttack', 'FGSMT',
'BasicIterativeMethodAttack', 'BIM',
'IterativeLeastLikelyClassMethodAttack', 'ILCM',
'MomentumIteratorAttack', 'MIFGSM',
'ProjectedGradientDescentAttack', 'PGD'
]
class GradientMethodAttack(Attack):
"""
This class implements gradient attack method, and is the base of FGSM, BIM, ILCM, etc.
"""
def __init__(self, model, support_targeted=True, pgd_flag=False):
"""
Args:
model: An instance of a paddle model to be attacked.
support_targeted(Does): this attack method support targeted.
pgd_flag: place it true if use pgd
"""
super(GradientMethodAttack, self).__init__(model)
self.support_targeted = support_targeted
self.pgd_flag = pgd_flag
def _apply(self,
adversary,
norm_ord=None,
epsilons=0.01,
epsilon_steps=10,
steps=100,
perturb=16.0 / 256,
):
"""
Apply the gradient attack method.
Args:
adversary: The Adversary object.
norm_ord: Order of the norm, such as np.inf, 1, 2, etc. It can't be 0.
epsilons: Attack step size (input variation). Largest step size if epsilons is not iterable.
epsilon_steps: The number of Epsilons' iteration for each attack iteration.
steps: The number of attack iteration.
Returns:
adversary(Adversary): The Adversary object.
"""
if norm_ord is None:
norm_ord = np.inf
if norm_ord == 0:
raise ValueError("L0 norm is not supported!")
if not self.support_targeted:
if adversary.is_targeted_attack:
raise ValueError(
"This attack method doesn't support targeted attack!")
if not isinstance(epsilons, Iterable):
if epsilon_steps == 1:
epsilons = [epsilons]
else:
epsilons = np.linspace(0, epsilons, num=epsilon_steps)
assert self.model.channel_axis == adversary.original.ndim
assert (self.model.channel_axis == 1 or
self.model.channel_axis == adversary.original.shape[0] or
self.model.channel_axis == adversary.original.shape[-1])
original_label = adversary.original_label
min_, max_ = self.model.bounds
adv_img = adversary.original
if len(adv_img.shape) < 4:
adv_img = np.expand_dims(adv_img, axis=0)
adv_img = paddle.to_tensor(adv_img, dtype='float32', place=self._device)
adv_img.stop_gradient = False
if adversary.is_targeted_attack:
target_label = adversary.target_label
target_label = paddle.to_tensor(target_label, dtype='int64', place=self._device)
for epsilon in epsilons[:]:
if epsilon == 0.0:
continue
for step in range(steps):
if adversary.is_targeted_attack:
gradient = - self.model.gradient(adv_img, target_label)
else:
gradient = self.model.gradient(adv_img, original_label)
gradient = paddle.to_tensor(gradient, dtype='float32', place=self._device)
if norm_ord == np.inf:
gradient_norm = paddle.sign(gradient)
else:
gradient_norm = gradient / self._norm(gradient.numpy(), ord=norm_ord)
if len(adv_img.shape) < 4:
adv_img = np.expand_dims(adv_img.numpy(), axis=0)
if self.pgd_flag:
# linf
adv_img = adv_img + gradient_norm * epsilon
clip_max = np.clip(adv_img.numpy() * (1.0 + perturb), min_, max_)
clip_min = np.clip(adv_img.numpy() * (1.0 - perturb), min_, max_)
adv_img = np.clip(adv_img.numpy(), clip_min, clip_max)
adv_label = np.argmax(self.model.predict(paddle.to_tensor(adv_img)))
adv_img = paddle.to_tensor(adv_img)
else:
adv_img = adv_img + gradient_norm * epsilon
adv_label = np.argmax(self.model.predict(adv_img))
if adversary.try_accept_the_example(np.squeeze(adv_img.numpy()), adv_label):
return adversary
return adversary
@staticmethod
def _norm(a, ord):
if a.ndim == 1 or a.ndim == 2:
return np.linalg.norm(a, ord=ord)
# channel first
elif a.ndim == a.shape[0]:
norm_shape = a.ndim * a.shape[1:][0] * a.shape[1:][0]
# channel last
else:
norm_shape = a.ndim * a.shape[:-1][0] * a.shape[:-1][1]
return np.linalg.norm(a.reshape(norm_shape), ord=ord)
class FastGradientSignMethodTargetedAttack(GradientMethodAttack):
"""
"Fast Gradient Sign Method" is extended to support targeted attack.
"Fast Gradient Sign Method" was originally implemented by Goodfellow et
al. (2015) with the infinity norm.
Paper link: https://arxiv.org/abs/1412.6572
"""
def _apply(self, adversary, **kwargs):
"""
Launch an attack process.
Args:
adversary: Adversary. An adversary instance with initial status.
**kwargs: Other named arguments.
Returns:
An adversary status with changed status.
"""
return GradientMethodAttack._apply(self, adversary=adversary, **kwargs)
class FastGradientSignMethodAttack(FastGradientSignMethodTargetedAttack):
"""
This attack was originally implemented by Goodfellow et al. (2015) with the
infinity norm, and is known as the "Fast Gradient Sign Method".
Paper link: https://arxiv.org/abs/1412.6572
"""
def __init__(self, model):
"""
FGSM attack init.
Args:
model: PaddleWhiteBoxModel.
"""
super(FastGradientSignMethodAttack, self).__init__(model, False)
class IterativeLeastLikelyClassMethodAttack(GradientMethodAttack):
"""
"Iterative Least-likely Class Method (ILCM)" extends "BIM" to support
targeted attack.
"The Basic Iterative Method (BIM)" is to extend "FSGM". "BIM" iteratively
take multiple small steps while adjusting the direction after each step.
Paper link: https://arxiv.org/abs/1607.02533
"""
def _apply(self, adversary, epsilons=0.01, steps=1000):
"""
Launch an attack process.
Args:
adversary: Adversary. An adversary instance with initial status.
epsilons: float. A single step perturbation length.
steps: int. Total steps number.
Returns:
An adversary status with changed status.
"""
return GradientMethodAttack._apply(
self,
adversary=adversary,
norm_ord=np.inf,
epsilons=epsilons,
steps=steps)
class BasicIterativeMethodAttack(IterativeLeastLikelyClassMethodAttack):
"""
FGSM is a one-step method. "The Basic Iterative Method (BIM)" iteratively
take multiple small steps while adjusting the direction after each step.
Paper link: https://arxiv.org/abs/1607.02533
"""
def __init__(self, model):
"""
Args:
model: PaddleWhiteBoxModel.
"""
super(BasicIterativeMethodAttack, self).__init__(model, False)
class MomentumIteratorAttack(GradientMethodAttack):
"""
The Momentum Iterative Fast Gradient Sign Method (Dong et al. 2017).
This method won the first places in NIPS 2017 Non-targeted Adversarial
Attacks and Targeted Adversarial Attacks. The original paper used
hard labels for this attack; no label smoothing. inf norm.
Paper link: https://arxiv.org/pdf/1710.06081.pdf
"""
def __init__(self, model, support_targeted=True):
"""
MIFGSM attack init.
Args:
model: PaddleWhiteBoxModel.
support_targeted: bool.
"""
super(MomentumIteratorAttack, self).__init__(model)
self.support_targeted = support_targeted
def _apply(self,
adversary,
norm_ord=None,
epsilons=0.1,
steps=100,
epsilon_steps=100,
decay_factor=1):
"""
Apply the momentum iterative gradient attack method.
Args:
adversary: Adversary. An adversary instance with initial status.
norm_ord: int. Order of the norm, such as np.inf, 1, 2, etc. It can't be 0.
epsilons: (list|tuple|float). Attack step size (input variation). Largest step size if epsilons is not iterable.
steps: int. The number of attack iteration.
epsilon_steps: int. The number of Epsilons' iteration for each attack iteration.
decay_factor: float. The decay factor for the momentum term.
Returns:
An adversary status with changed status.
"""
if norm_ord is None:
norm_ord = np.inf
if norm_ord == 0:
raise ValueError("L0 norm is not supported!")
if not self.support_targeted:
if adversary.is_targeted_attack:
raise ValueError(
"This attack method doesn't support targeted attack!")
if not isinstance(epsilons, Iterable):
epsilons = np.linspace(0, epsilons, num=epsilon_steps)
min_, max_ = self.model.bounds
original_label = adversary.original_label
original_label = paddle.to_tensor(original_label, dtype='int64', place=self._device)
if adversary.is_targeted_attack:
target_label = adversary.target_label
target_label = paddle.to_tensor(target_label, dtype='int64', place=self._device)
for epsilon in epsilons[:]:
if epsilon == 0.0:
continue
adv_img = adversary.original
if len(adv_img.shape) < 4:
adv_img = np.expand_dims(adv_img, axis=0)
adv_img = paddle.to_tensor(adv_img, dtype='float32', place=self._device)
adv_img.stop_gradient = False
momentum = 0
for step in range(steps):
if adversary.is_targeted_attack:
gradient = - self.model.gradient(adv_img, target_label)
else:
gradient = self.model.gradient(adv_img, original_label)
gradient = np.squeeze(gradient)
velocity = gradient / self._norm(gradient, ord=1)
velocity = np.expand_dims(velocity, axis=0)
momentum = decay_factor * momentum + velocity
if norm_ord == np.inf:
normalized_grad = np.sign(momentum)
else:
normalized_grad = self._norm(momentum, ord=norm_ord)
perturbation = epsilon * normalized_grad
perturbation = paddle.to_tensor(perturbation)
adv_img = adv_img + perturbation
adv_label = np.argmax(self.model.predict(adv_img))
logging.info('step={}, epsilon = {:.5f}, pre_label = {}, adv_label={}' .format(step,
epsilon,
original_label,
adv_label))
if adversary.try_accept_the_example(np.squeeze(adv_img.numpy()), adv_label):
return adversary
return adversary
class ProjectedGradientDescentAttack(GradientMethodAttack):
"""
Projected Gradient Descent
Towards deep learning models resistant to adversarial attacks, <NAME>, <NAME>, <NAME>, <NAME>,
and <NAME>, ICLR 2018
"""
def __init__(self, model, support_targeted=True, pgd_flag=True):
"""
PGD attack init.
Args:
model: PaddleWhiteBoxModel.
"""
super(ProjectedGradientDescentAttack, self).__init__(model)
self.support_targeted = support_targeted
self.pgd_flag = pgd_flag
FGSM = FastGradientSignMethodAttack
FGSMT = FastGradientSignMethodTargetedAttack
BIM = BasicIterativeMethodAttack
ILCM = IterativeLeastLikelyClassMethodAttack
MIFGSM = MomentumIteratorAttack
PGD = ProjectedGradientDescentAttack
| # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides the implementation for FGSM attack method.
"""
from __future__ import division
import logging
from collections import Iterable
import numpy as np
import paddle
from .base import Attack
__all__ = [
'GradientMethodAttack', 'FastGradientSignMethodAttack', 'FGSM',
'FastGradientSignMethodTargetedAttack', 'FGSMT',
'BasicIterativeMethodAttack', 'BIM',
'IterativeLeastLikelyClassMethodAttack', 'ILCM',
'MomentumIteratorAttack', 'MIFGSM',
'ProjectedGradientDescentAttack', 'PGD'
]
class GradientMethodAttack(Attack):
"""
This class implements gradient attack method, and is the base of FGSM, BIM, ILCM, etc.
"""
def __init__(self, model, support_targeted=True, pgd_flag=False):
"""
Args:
model: An instance of a paddle model to be attacked.
support_targeted(Does): this attack method support targeted.
pgd_flag: place it true if use pgd
"""
super(GradientMethodAttack, self).__init__(model)
self.support_targeted = support_targeted
self.pgd_flag = pgd_flag
def _apply(self,
adversary,
norm_ord=None,
epsilons=0.01,
epsilon_steps=10,
steps=100,
perturb=16.0 / 256,
):
"""
Apply the gradient attack method.
Args:
adversary: The Adversary object.
norm_ord: Order of the norm, such as np.inf, 1, 2, etc. It can't be 0.
epsilons: Attack step size (input variation). Largest step size if epsilons is not iterable.
epsilon_steps: The number of Epsilons' iteration for each attack iteration.
steps: The number of attack iteration.
Returns:
adversary(Adversary): The Adversary object.
"""
if norm_ord is None:
norm_ord = np.inf
if norm_ord == 0:
raise ValueError("L0 norm is not supported!")
if not self.support_targeted:
if adversary.is_targeted_attack:
raise ValueError(
"This attack method doesn't support targeted attack!")
if not isinstance(epsilons, Iterable):
if epsilon_steps == 1:
epsilons = [epsilons]
else:
epsilons = np.linspace(0, epsilons, num=epsilon_steps)
assert self.model.channel_axis == adversary.original.ndim
assert (self.model.channel_axis == 1 or
self.model.channel_axis == adversary.original.shape[0] or
self.model.channel_axis == adversary.original.shape[-1])
original_label = adversary.original_label
min_, max_ = self.model.bounds
adv_img = adversary.original
if len(adv_img.shape) < 4:
adv_img = np.expand_dims(adv_img, axis=0)
adv_img = paddle.to_tensor(adv_img, dtype='float32', place=self._device)
adv_img.stop_gradient = False
if adversary.is_targeted_attack:
target_label = adversary.target_label
target_label = paddle.to_tensor(target_label, dtype='int64', place=self._device)
for epsilon in epsilons[:]:
if epsilon == 0.0:
continue
for step in range(steps):
if adversary.is_targeted_attack:
gradient = - self.model.gradient(adv_img, target_label)
else:
gradient = self.model.gradient(adv_img, original_label)
gradient = paddle.to_tensor(gradient, dtype='float32', place=self._device)
if norm_ord == np.inf:
gradient_norm = paddle.sign(gradient)
else:
gradient_norm = gradient / self._norm(gradient.numpy(), ord=norm_ord)
if len(adv_img.shape) < 4:
adv_img = np.expand_dims(adv_img.numpy(), axis=0)
if self.pgd_flag:
# linf
adv_img = adv_img + gradient_norm * epsilon
clip_max = np.clip(adv_img.numpy() * (1.0 + perturb), min_, max_)
clip_min = np.clip(adv_img.numpy() * (1.0 - perturb), min_, max_)
adv_img = np.clip(adv_img.numpy(), clip_min, clip_max)
adv_label = np.argmax(self.model.predict(paddle.to_tensor(adv_img)))
adv_img = paddle.to_tensor(adv_img)
else:
adv_img = adv_img + gradient_norm * epsilon
adv_label = np.argmax(self.model.predict(adv_img))
if adversary.try_accept_the_example(np.squeeze(adv_img.numpy()), adv_label):
return adversary
return adversary
@staticmethod
def _norm(a, ord):
if a.ndim == 1 or a.ndim == 2:
return np.linalg.norm(a, ord=ord)
# channel first
elif a.ndim == a.shape[0]:
norm_shape = a.ndim * a.shape[1:][0] * a.shape[1:][0]
# channel last
else:
norm_shape = a.ndim * a.shape[:-1][0] * a.shape[:-1][1]
return np.linalg.norm(a.reshape(norm_shape), ord=ord)
class FastGradientSignMethodTargetedAttack(GradientMethodAttack):
"""
"Fast Gradient Sign Method" is extended to support targeted attack.
"Fast Gradient Sign Method" was originally implemented by Goodfellow et
al. (2015) with the infinity norm.
Paper link: https://arxiv.org/abs/1412.6572
"""
def _apply(self, adversary, **kwargs):
"""
Launch an attack process.
Args:
adversary: Adversary. An adversary instance with initial status.
**kwargs: Other named arguments.
Returns:
An adversary status with changed status.
"""
return GradientMethodAttack._apply(self, adversary=adversary, **kwargs)
class FastGradientSignMethodAttack(FastGradientSignMethodTargetedAttack):
"""
This attack was originally implemented by Goodfellow et al. (2015) with the
infinity norm, and is known as the "Fast Gradient Sign Method".
Paper link: https://arxiv.org/abs/1412.6572
"""
def __init__(self, model):
"""
FGSM attack init.
Args:
model: PaddleWhiteBoxModel.
"""
super(FastGradientSignMethodAttack, self).__init__(model, False)
class IterativeLeastLikelyClassMethodAttack(GradientMethodAttack):
"""
"Iterative Least-likely Class Method (ILCM)" extends "BIM" to support
targeted attack.
"The Basic Iterative Method (BIM)" is to extend "FSGM". "BIM" iteratively
take multiple small steps while adjusting the direction after each step.
Paper link: https://arxiv.org/abs/1607.02533
"""
def _apply(self, adversary, epsilons=0.01, steps=1000):
"""
Launch an attack process.
Args:
adversary: Adversary. An adversary instance with initial status.
epsilons: float. A single step perturbation length.
steps: int. Total steps number.
Returns:
An adversary status with changed status.
"""
return GradientMethodAttack._apply(
self,
adversary=adversary,
norm_ord=np.inf,
epsilons=epsilons,
steps=steps)
class BasicIterativeMethodAttack(IterativeLeastLikelyClassMethodAttack):
"""
FGSM is a one-step method. "The Basic Iterative Method (BIM)" iteratively
take multiple small steps while adjusting the direction after each step.
Paper link: https://arxiv.org/abs/1607.02533
"""
def __init__(self, model):
"""
Args:
model: PaddleWhiteBoxModel.
"""
super(BasicIterativeMethodAttack, self).__init__(model, False)
class MomentumIteratorAttack(GradientMethodAttack):
"""
The Momentum Iterative Fast Gradient Sign Method (Dong et al. 2017).
This method won the first places in NIPS 2017 Non-targeted Adversarial
Attacks and Targeted Adversarial Attacks. The original paper used
hard labels for this attack; no label smoothing. inf norm.
Paper link: https://arxiv.org/pdf/1710.06081.pdf
"""
def __init__(self, model, support_targeted=True):
"""
MIFGSM attack init.
Args:
model: PaddleWhiteBoxModel.
support_targeted: bool.
"""
super(MomentumIteratorAttack, self).__init__(model)
self.support_targeted = support_targeted
def _apply(self,
adversary,
norm_ord=None,
epsilons=0.1,
steps=100,
epsilon_steps=100,
decay_factor=1):
"""
Apply the momentum iterative gradient attack method.
Args:
adversary: Adversary. An adversary instance with initial status.
norm_ord: int. Order of the norm, such as np.inf, 1, 2, etc. It can't be 0.
epsilons: (list|tuple|float). Attack step size (input variation). Largest step size if epsilons is not iterable.
steps: int. The number of attack iteration.
epsilon_steps: int. The number of Epsilons' iteration for each attack iteration.
decay_factor: float. The decay factor for the momentum term.
Returns:
An adversary status with changed status.
"""
if norm_ord is None:
norm_ord = np.inf
if norm_ord == 0:
raise ValueError("L0 norm is not supported!")
if not self.support_targeted:
if adversary.is_targeted_attack:
raise ValueError(
"This attack method doesn't support targeted attack!")
if not isinstance(epsilons, Iterable):
epsilons = np.linspace(0, epsilons, num=epsilon_steps)
min_, max_ = self.model.bounds
original_label = adversary.original_label
original_label = paddle.to_tensor(original_label, dtype='int64', place=self._device)
if adversary.is_targeted_attack:
target_label = adversary.target_label
target_label = paddle.to_tensor(target_label, dtype='int64', place=self._device)
for epsilon in epsilons[:]:
if epsilon == 0.0:
continue
adv_img = adversary.original
if len(adv_img.shape) < 4:
adv_img = np.expand_dims(adv_img, axis=0)
adv_img = paddle.to_tensor(adv_img, dtype='float32', place=self._device)
adv_img.stop_gradient = False
momentum = 0
for step in range(steps):
if adversary.is_targeted_attack:
gradient = - self.model.gradient(adv_img, target_label)
else:
gradient = self.model.gradient(adv_img, original_label)
gradient = np.squeeze(gradient)
velocity = gradient / self._norm(gradient, ord=1)
velocity = np.expand_dims(velocity, axis=0)
momentum = decay_factor * momentum + velocity
if norm_ord == np.inf:
normalized_grad = np.sign(momentum)
else:
normalized_grad = self._norm(momentum, ord=norm_ord)
perturbation = epsilon * normalized_grad
perturbation = paddle.to_tensor(perturbation)
adv_img = adv_img + perturbation
adv_label = np.argmax(self.model.predict(adv_img))
logging.info('step={}, epsilon = {:.5f}, pre_label = {}, adv_label={}' .format(step,
epsilon,
original_label,
adv_label))
if adversary.try_accept_the_example(np.squeeze(adv_img.numpy()), adv_label):
return adversary
return adversary
class ProjectedGradientDescentAttack(GradientMethodAttack):
"""
Projected Gradient Descent
Towards deep learning models resistant to adversarial attacks, <NAME>, <NAME>, <NAME>, <NAME>,
and <NAME>, ICLR 2018
"""
def __init__(self, model, support_targeted=True, pgd_flag=True):
"""
PGD attack init.
Args:
model: PaddleWhiteBoxModel.
"""
super(ProjectedGradientDescentAttack, self).__init__(model)
self.support_targeted = support_targeted
self.pgd_flag = pgd_flag
FGSM = FastGradientSignMethodAttack
FGSMT = FastGradientSignMethodTargetedAttack
BIM = BasicIterativeMethodAttack
ILCM = IterativeLeastLikelyClassMethodAttack
MIFGSM = MomentumIteratorAttack
PGD = ProjectedGradientDescentAttack
| en | 0.84651 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. This module provides the implementation for FGSM attack method. This class implements gradient attack method, and is the base of FGSM, BIM, ILCM, etc. Args: model: An instance of a paddle model to be attacked. support_targeted(Does): this attack method support targeted. pgd_flag: place it true if use pgd Apply the gradient attack method. Args: adversary: The Adversary object. norm_ord: Order of the norm, such as np.inf, 1, 2, etc. It can't be 0. epsilons: Attack step size (input variation). Largest step size if epsilons is not iterable. epsilon_steps: The number of Epsilons' iteration for each attack iteration. steps: The number of attack iteration. Returns: adversary(Adversary): The Adversary object. # linf # channel first # channel last "Fast Gradient Sign Method" is extended to support targeted attack. "Fast Gradient Sign Method" was originally implemented by Goodfellow et al. (2015) with the infinity norm. Paper link: https://arxiv.org/abs/1412.6572 Launch an attack process. Args: adversary: Adversary. An adversary instance with initial status. **kwargs: Other named arguments. Returns: An adversary status with changed status. This attack was originally implemented by Goodfellow et al. (2015) with the infinity norm, and is known as the "Fast Gradient Sign Method". Paper link: https://arxiv.org/abs/1412.6572 FGSM attack init. Args: model: PaddleWhiteBoxModel. "Iterative Least-likely Class Method (ILCM)" extends "BIM" to support targeted attack. "The Basic Iterative Method (BIM)" is to extend "FSGM". "BIM" iteratively take multiple small steps while adjusting the direction after each step. Paper link: https://arxiv.org/abs/1607.02533 Launch an attack process. Args: adversary: Adversary. An adversary instance with initial status. epsilons: float. A single step perturbation length. steps: int. Total steps number. Returns: An adversary status with changed status. FGSM is a one-step method. "The Basic Iterative Method (BIM)" iteratively take multiple small steps while adjusting the direction after each step. Paper link: https://arxiv.org/abs/1607.02533 Args: model: PaddleWhiteBoxModel. The Momentum Iterative Fast Gradient Sign Method (Dong et al. 2017). This method won the first places in NIPS 2017 Non-targeted Adversarial Attacks and Targeted Adversarial Attacks. The original paper used hard labels for this attack; no label smoothing. inf norm. Paper link: https://arxiv.org/pdf/1710.06081.pdf MIFGSM attack init. Args: model: PaddleWhiteBoxModel. support_targeted: bool. Apply the momentum iterative gradient attack method. Args: adversary: Adversary. An adversary instance with initial status. norm_ord: int. Order of the norm, such as np.inf, 1, 2, etc. It can't be 0. epsilons: (list|tuple|float). Attack step size (input variation). Largest step size if epsilons is not iterable. steps: int. The number of attack iteration. epsilon_steps: int. The number of Epsilons' iteration for each attack iteration. decay_factor: float. The decay factor for the momentum term. Returns: An adversary status with changed status. Projected Gradient Descent Towards deep learning models resistant to adversarial attacks, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>, ICLR 2018 PGD attack init. Args: model: PaddleWhiteBoxModel. | 2.306589 | 2 |
native.py | hclivess/qtrader | 14 | 6621417 | <gh_stars>10-100
from qtrade_client.api import QtradeAPI
def load_credentials():
with open("secret") as authfile:
return authfile.read()
# String is of the format "[key_id]:[key]"
client_native = QtradeAPI("https://api.qtrade.io", key=load_credentials())
# result = client.post("/v1/user/sell_limit", amount="1", price="0.0001", market_id=12)
# print(result)
# Only closed orders
print(client_native.orders(open=False))
# Print all orders before ID 25
print(client_native.orders(older_than=25))
# Print all orders after ID 25
print(client_native.orders(newer_than=25))
client_native.balances() | from qtrade_client.api import QtradeAPI
def load_credentials():
with open("secret") as authfile:
return authfile.read()
# String is of the format "[key_id]:[key]"
client_native = QtradeAPI("https://api.qtrade.io", key=load_credentials())
# result = client.post("/v1/user/sell_limit", amount="1", price="0.0001", market_id=12)
# print(result)
# Only closed orders
print(client_native.orders(open=False))
# Print all orders before ID 25
print(client_native.orders(older_than=25))
# Print all orders after ID 25
print(client_native.orders(newer_than=25))
client_native.balances() | en | 0.646758 | # String is of the format "[key_id]:[key]" # result = client.post("/v1/user/sell_limit", amount="1", price="0.0001", market_id=12) # print(result) # Only closed orders # Print all orders before ID 25 # Print all orders after ID 25 | 2.551647 | 3 |
schema_org_web_scanner/src/d1_schema_scan/asgi.py | DataONEorg/d1_ncei_adapter | 1 | 6621418 | """
ASGI entrypoint. Configures Django and then runs the application
defined in the ASGI_APPLICATION setting.
"""
import os
import channels.routing
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "d1_schema_scan.settings_deploy")
django.setup()
application = channels.routing.get_default_application()
| """
ASGI entrypoint. Configures Django and then runs the application
defined in the ASGI_APPLICATION setting.
"""
import os
import channels.routing
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "d1_schema_scan.settings_deploy")
django.setup()
application = channels.routing.get_default_application()
| en | 0.7872 | ASGI entrypoint. Configures Django and then runs the application defined in the ASGI_APPLICATION setting. | 1.786749 | 2 |
import_osm_metadata.py | ckurze/mongodb-knowledge-graph | 0 | 6621419 | import requests
import pymongo
import re
from pprint import pprint
import json
from pyld import jsonld
# Semantic Description of OpenStreetMap: https://wiki.openstreetmap.org/wiki/OSM_Semantic_Network
# API doc for tags in Openstreetmap: https://taginfo.openstreetmap.org/taginfo/apidoc#api_4_keys_all
# Need the attributes of the classes (tags to use in combination): https://wiki.openstreetmap.org/wiki/Tag%3Asubstation%3Dconverter
# /api/4/tag/combinations?key=highway&value=residential&page=1&rp=10&sortname=together_count&sortorder=desc
# HTTP requests: https://requests.readthedocs.io/en/master/
# PyLD: JSON-LD in Python (pip3 install PyLD), https://github.com/digitalbazaar/pyld
# Example Ontology in JSON-LD https://gist.github.com/stain/7690362
# sudo yum -y install git python3
# PyOSM: https://pypi.org/project/esy-osm-pbf/
# pip3 install pymongo dnspython requests esy-osm-pbf pyld
# curl -OL https://download.geofabrik.de/europe/germany/bayern-latest.osm.pbf
# curl -OL https://download.geofabrik.de/europe/germany/germany-latest.osm.pbf
OPENSTREETMAP_NAMESPACE = 'osmpower'
OPENSTREETMAP = 'openstreetmap'
KEY_TO_ANALYZE = 'power'
# Some classes appear twice, so we have multiple parent classes
classes = { }
properties = { }
all_class_names = set(())
mongo_client = pymongo.MongoClient('localhost')
raw_class_tags_coll = mongo_client.osm.raw_class_tags
raw_releated_tags_coll = mongo_client.osm.raw_releated_tags
raw_key_wiki_information_coll = mongo_client.osm.raw_key_wiki_information
def main():
# Get the Classes
getKey(KEY_TO_ANALYZE, 0)
# Get the related tags, i.e. Properties of Classes
for row in raw_class_tags_coll.find():
get_related_tags(row)
def getKey(key, depth=0, parent_class_name=''):
r = requests.get('https://taginfo.openstreetmap.org/api/4/tags/list?key=' + key)
if r.status_code != 200:
print('Error while calling service')
return
r_json = r.json()
r_json['key'] = key
if not key in classes and len(r_json['data']) > 0:
all_class_names.add(key)
raw_class_tags_coll.replace_one(
{ 'key': key },
r_json,
upsert=True)
classes[key] = { 'dummy': 1 }
for tag in r_json['data']:
padding = ' ' * depth
print('Get tag information for: ' + padding + tag['key'] + ' ' + tag['value'])
if tag['value'] not in classes:
getKey(tag['value'], depth + 1, tag['value'])
def get_related_tags(class_key):
for tag in class_key['data']:
print('Get related tags for ' + tag['key'] + ': ' + tag['value'])
r = requests.get('https://taginfo.openstreetmap.org/api/4/tag/combinations?key=' + tag['key'] + '&value=' + tag['value'])
if r.status_code != 200:
print('Error while calling service')
return
r_json = r.json()
r_json['key'] = tag['key']
r_json['value'] = tag['value']
raw_releated_tags_coll.replace_one(
{ 'key': r_json['key'], 'value': r_json['value'] },
r_json,
upsert=True)
other_key = set(())
[other_key.add(data['other_key']) for data in r_json['data']]
for key in other_key:
get_key_wiki_information(key)
def get_key_wiki_information(key):
print('Get wiki information for ' + key)
r = requests.get('https://taginfo.openstreetmap.org/api/4/key/wiki_pages?key=' + key)
if r.status_code != 200:
print('Error while calling service')
return
r_json = r.json()
r_json['key'] = key
raw_key_wiki_information_coll.replace_one(
{ 'key': r_json['key'] },
r_json,
upsert=True)
if __name__ == '__main__':
main()
| import requests
import pymongo
import re
from pprint import pprint
import json
from pyld import jsonld
# Semantic Description of OpenStreetMap: https://wiki.openstreetmap.org/wiki/OSM_Semantic_Network
# API doc for tags in Openstreetmap: https://taginfo.openstreetmap.org/taginfo/apidoc#api_4_keys_all
# Need the attributes of the classes (tags to use in combination): https://wiki.openstreetmap.org/wiki/Tag%3Asubstation%3Dconverter
# /api/4/tag/combinations?key=highway&value=residential&page=1&rp=10&sortname=together_count&sortorder=desc
# HTTP requests: https://requests.readthedocs.io/en/master/
# PyLD: JSON-LD in Python (pip3 install PyLD), https://github.com/digitalbazaar/pyld
# Example Ontology in JSON-LD https://gist.github.com/stain/7690362
# sudo yum -y install git python3
# PyOSM: https://pypi.org/project/esy-osm-pbf/
# pip3 install pymongo dnspython requests esy-osm-pbf pyld
# curl -OL https://download.geofabrik.de/europe/germany/bayern-latest.osm.pbf
# curl -OL https://download.geofabrik.de/europe/germany/germany-latest.osm.pbf
OPENSTREETMAP_NAMESPACE = 'osmpower'
OPENSTREETMAP = 'openstreetmap'
KEY_TO_ANALYZE = 'power'
# Some classes appear twice, so we have multiple parent classes
classes = { }
properties = { }
all_class_names = set(())
mongo_client = pymongo.MongoClient('localhost')
raw_class_tags_coll = mongo_client.osm.raw_class_tags
raw_releated_tags_coll = mongo_client.osm.raw_releated_tags
raw_key_wiki_information_coll = mongo_client.osm.raw_key_wiki_information
def main():
# Get the Classes
getKey(KEY_TO_ANALYZE, 0)
# Get the related tags, i.e. Properties of Classes
for row in raw_class_tags_coll.find():
get_related_tags(row)
def getKey(key, depth=0, parent_class_name=''):
r = requests.get('https://taginfo.openstreetmap.org/api/4/tags/list?key=' + key)
if r.status_code != 200:
print('Error while calling service')
return
r_json = r.json()
r_json['key'] = key
if not key in classes and len(r_json['data']) > 0:
all_class_names.add(key)
raw_class_tags_coll.replace_one(
{ 'key': key },
r_json,
upsert=True)
classes[key] = { 'dummy': 1 }
for tag in r_json['data']:
padding = ' ' * depth
print('Get tag information for: ' + padding + tag['key'] + ' ' + tag['value'])
if tag['value'] not in classes:
getKey(tag['value'], depth + 1, tag['value'])
def get_related_tags(class_key):
for tag in class_key['data']:
print('Get related tags for ' + tag['key'] + ': ' + tag['value'])
r = requests.get('https://taginfo.openstreetmap.org/api/4/tag/combinations?key=' + tag['key'] + '&value=' + tag['value'])
if r.status_code != 200:
print('Error while calling service')
return
r_json = r.json()
r_json['key'] = tag['key']
r_json['value'] = tag['value']
raw_releated_tags_coll.replace_one(
{ 'key': r_json['key'], 'value': r_json['value'] },
r_json,
upsert=True)
other_key = set(())
[other_key.add(data['other_key']) for data in r_json['data']]
for key in other_key:
get_key_wiki_information(key)
def get_key_wiki_information(key):
print('Get wiki information for ' + key)
r = requests.get('https://taginfo.openstreetmap.org/api/4/key/wiki_pages?key=' + key)
if r.status_code != 200:
print('Error while calling service')
return
r_json = r.json()
r_json['key'] = key
raw_key_wiki_information_coll.replace_one(
{ 'key': r_json['key'] },
r_json,
upsert=True)
if __name__ == '__main__':
main()
| en | 0.437455 | # Semantic Description of OpenStreetMap: https://wiki.openstreetmap.org/wiki/OSM_Semantic_Network # API doc for tags in Openstreetmap: https://taginfo.openstreetmap.org/taginfo/apidoc#api_4_keys_all # Need the attributes of the classes (tags to use in combination): https://wiki.openstreetmap.org/wiki/Tag%3Asubstation%3Dconverter # /api/4/tag/combinations?key=highway&value=residential&page=1&rp=10&sortname=together_count&sortorder=desc # HTTP requests: https://requests.readthedocs.io/en/master/ # PyLD: JSON-LD in Python (pip3 install PyLD), https://github.com/digitalbazaar/pyld # Example Ontology in JSON-LD https://gist.github.com/stain/7690362 # sudo yum -y install git python3 # PyOSM: https://pypi.org/project/esy-osm-pbf/ # pip3 install pymongo dnspython requests esy-osm-pbf pyld # curl -OL https://download.geofabrik.de/europe/germany/bayern-latest.osm.pbf # curl -OL https://download.geofabrik.de/europe/germany/germany-latest.osm.pbf # Some classes appear twice, so we have multiple parent classes # Get the Classes # Get the related tags, i.e. Properties of Classes | 2.557505 | 3 |
testing/test_bytecode.py | shendel/hippyvm | 1 | 6621420 |
from hippy.objspace import getspace
from hippy.phpcompiler import compile_php
from hippy.bytecode import unserialize
from testing.test_interpreter import MockInterpreter
class TestBytecode(object):
def test_basic_serialize(self):
source = "<? $a = 3; var_dump($a);?>"
space = getspace()
bc = compile_php('<input>', source, space)
dump = bc.serialize()
interp = MockInterpreter(space)
bc2 = unserialize(dump, interp)
assert bc.dump() == bc2.dump()
assert space.int_w(bc2.consts[0]) == 3
assert bc2.name == bc.name
assert bc2.filename == bc.filename
assert bc2.startlineno == bc.startlineno
assert bc2.sourcelines == bc.sourcelines
assert bc.names == bc2.names
assert bc.varnames == bc2.varnames
interp.run_main(space, bc2)
assert interp.output[0] == 'int(3)\n'
def test_serialize_with_calls(self):
source = """<?
function f($a) {
return $a + 4;
}
echo f(3);
?>"""
space = getspace()
bc = compile_php('<input>', source, space)
dump = bc.serialize()
interp = MockInterpreter(space)
bc2 = unserialize(dump, interp)
interp.run_main(space, bc2)
assert space.int_w(interp.output[0]) == 3 + 4
def test_serialize_with_classes(self):
source = """<?
class X {
function __construct() {
$this->x = 3;
}
}
$x = new X();
echo $x->x;
?>"""
space = getspace()
bc = compile_php('<input>', source, space)
dump = bc.serialize()
interp = MockInterpreter(space)
bc2 = unserialize(dump, interp)
interp.run_main(space, bc2)
assert space.int_w(interp.output[0]) == 3
|
from hippy.objspace import getspace
from hippy.phpcompiler import compile_php
from hippy.bytecode import unserialize
from testing.test_interpreter import MockInterpreter
class TestBytecode(object):
def test_basic_serialize(self):
source = "<? $a = 3; var_dump($a);?>"
space = getspace()
bc = compile_php('<input>', source, space)
dump = bc.serialize()
interp = MockInterpreter(space)
bc2 = unserialize(dump, interp)
assert bc.dump() == bc2.dump()
assert space.int_w(bc2.consts[0]) == 3
assert bc2.name == bc.name
assert bc2.filename == bc.filename
assert bc2.startlineno == bc.startlineno
assert bc2.sourcelines == bc.sourcelines
assert bc.names == bc2.names
assert bc.varnames == bc2.varnames
interp.run_main(space, bc2)
assert interp.output[0] == 'int(3)\n'
def test_serialize_with_calls(self):
source = """<?
function f($a) {
return $a + 4;
}
echo f(3);
?>"""
space = getspace()
bc = compile_php('<input>', source, space)
dump = bc.serialize()
interp = MockInterpreter(space)
bc2 = unserialize(dump, interp)
interp.run_main(space, bc2)
assert space.int_w(interp.output[0]) == 3 + 4
def test_serialize_with_classes(self):
source = """<?
class X {
function __construct() {
$this->x = 3;
}
}
$x = new X();
echo $x->x;
?>"""
space = getspace()
bc = compile_php('<input>', source, space)
dump = bc.serialize()
interp = MockInterpreter(space)
bc2 = unserialize(dump, interp)
interp.run_main(space, bc2)
assert space.int_w(interp.output[0]) == 3
| en | 0.149446 | <? function f($a) { return $a + 4; } echo f(3); ?> <? class X { function __construct() { $this->x = 3; } } $x = new X(); echo $x->x; ?> | 2.322146 | 2 |
junk.py | cclauss/asyncio_hacks | 1 | 6621421 | class Channel(object):
def __init__(self, server, name, channel_id, members=None):
self.server = server
self.name = name
self.id = channel_id
self.members = members or []
def __eq__(self, compare_str):
if compare_str in (self.id, self.name) or "#" + compare_str == self.name:
return True
else:
return False
def __str__(self):
fmt = "{0} : {1:.40}"
return "\n".join(fmt.format(key, value) for key, value in self.__dict__.items())
def __repr__(self):
return self.__str__()
def send_message(self, message):
message_json = {"type": "message", "channel": self.id, "text": message}
self.server.send_to_websocket(message_json)
| class Channel(object):
def __init__(self, server, name, channel_id, members=None):
self.server = server
self.name = name
self.id = channel_id
self.members = members or []
def __eq__(self, compare_str):
if compare_str in (self.id, self.name) or "#" + compare_str == self.name:
return True
else:
return False
def __str__(self):
fmt = "{0} : {1:.40}"
return "\n".join(fmt.format(key, value) for key, value in self.__dict__.items())
def __repr__(self):
return self.__str__()
def send_message(self, message):
message_json = {"type": "message", "channel": self.id, "text": message}
self.server.send_to_websocket(message_json)
| none | 1 | 2.992568 | 3 | |
clean-registry-images.py | lowang-bh/registry | 6 | 6621422 | #!/usr/bin/env python
"""
this is a registry manipulator, can do following:
- list all images (including layers)
- delete images
- all except last N images
- all images and/or tags
#
run
registry.py -h
to get more help
#
important: after removing the tags, run the garbage collector
on your registry host:
docker-compose -f [path_to_your_docker_compose_file] run \
registry bin/registry garbage-collect \
/etc/docker/registry/config.yml
#
or if you are not using docker-compose:
docker run registry:2 bin/registry garbage-collect \
/etc/docker/registry/config.yml
#
for more detail on garbage collection read here:
https://docs.docker.com/registry/garbage-collection/
"""
import requests
import urllib3
import pprint
import base64
import re
import sys
import json
import os
import argparse
import www_authenticate
import logging
from logging import handlers
from getpass import getpass
from datetime import timedelta, datetime as dt
from urllib3.exceptions import InsecureRequestWarning
log = logging.Logger(__file__)
log.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
handler.setFormatter(
logging.Formatter("[%(levelname)s] %(asctime)s,%(lineno)4d, %(funcName)s : %(message)s", '%Y-%m-%d %H:%M:%S'))
fileHandler = handlers.RotatingFileHandler('/lain/app/clean.log', 'a', 10 * 1024 * 1024, 2)
fileHandler.setLevel(logging.DEBUG)
fileHandler.setFormatter(
logging.Formatter("[%(levelname)s] %(asctime)s,%(lineno)4d, %(funcName)s : %(message)s", '%Y-%m-%d %H:%M:%S'))
log.addHandler(handler)
log.addHandler(fileHandler)
# number of image versions to keep
CONST_KEEP_LAST_VERSIONS = 100
# this class is created for testing
class Requests:
def request(self, method, url, **kwargs):
return requests.request(method, url, **kwargs)
def bearer_request(self, method, url, auth, **kwargs):
log.debug("bearer_request()")
log.debug('[registry][request]: {0} {1}'.format(method, url))
if 'Authorization' in kwargs['headers']:
log.debug('[registry][request]: Authorization header:')
token_parsed = kwargs['headers']['Authorization'].split('.')
log.debug(pprint.pformat(json.loads(decode_base64(token_parsed[0]))))
log.debug(pprint.pformat(json.loads(decode_base64(token_parsed[1]))))
res = requests.request(method, url, **kwargs)
if str(res.status_code)[0] == '2':
log.debug("[registry] accepted")
return res, kwargs['headers']['Authorization']
if res.status_code == 401:
log.debug("[registry] Access denied. Refreshing token...")
oauth = www_authenticate.parse(res.headers['Www-Authenticate'])
log.debug('[auth][answer] Auth header:')
log.debug(pprint.pformat(oauth['bearer']))
log.info('retreiving bearer token for {0}'.format(oauth['bearer']['scope']))
# request_url = '{0}?service={1}&scope={2}'.format(oauth['bearer']['realm'],
# oauth['bearer']['service'],
# oauth['bearer']['scope'])
request_url = '{0}?service={1}&scope={2}'.format(oauth['bearer']['realm'],
oauth['bearer']['service'],
oauth['bearer']['scope'])
log.debug('[debug][auth][request] Refreshing auth token: POST {0}'.format(request_url))
try_oauth = requests.post(request_url, auth=auth, **kwargs)
try:
token = json.loads(try_oauth._content)['token']
log.info(">>> token: {}".format(token))
except SyntaxError:
log.error("\n\ncouldn't accure token: {0}".format(try_oauth._content))
sys.exit(1)
token_parsed = token.split('.')
log.debug('[auth] token issued: ')
log.debug(pprint.pformat(json.loads(decode_base64(token_parsed[0]))))
log.debug(pprint.pformat(json.loads(decode_base64(token_parsed[1]))))
kwargs['headers']['Authorization'] = 'Bearer {0}'.format(token)
else:
return res, kwargs['headers']['Authorization']
res = requests.request(method, url, **kwargs)
return res, kwargs['headers']['Authorization']
def natural_keys(text):
"""
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
"""
def __atoi(text):
return int(text) if text.isdigit() else text
return [__atoi(c) for c in re.split('(\d+)', text)]
def decode_base64(data):
"""
Decode base64, padding being optional.
:param data: Base64 data as an ASCII byte string
:returns: The decoded byte string.
"""
data = data.replace('Bearer ', '')
log.debug('base64 string to decode:\n{0}'.format(data))
missing_padding = len(data) % 4
if missing_padding != 0:
data += '=' * (4 - missing_padding)
if sys.version_info[0] <= 2:
return base64.decodestring(data)
else:
return base64.decodebytes(bytes(data, 'utf-8'))
def get_error_explanation(context, error_code):
error_list = {"delete_tag_405": 'You might want to set REGISTRY_STORAGE_DELETE_ENABLED: "true" in your registry',
"get_tag_digest_404": "Try adding flag --digest-method=GET"}
key = "%s_%s" % (context, error_code)
if key in error_list.keys():
return error_list[key]
return ''
def get_auth_schemes(r, path):
"""
Returns list of auth schemes(lowcased) if www-authenticate: header exists
returns None if no header found
- www-authenticate: basic
- www-authenticate: bearer
"""
try_oauth = requests.head('{0}{1}'.format(r.hostname, path), verify=not r.no_validate_ssl)
if 'Www-Authenticate' in try_oauth.headers:
oauth = www_authenticate.parse(try_oauth.headers['Www-Authenticate'])
log.debug('[docker] Auth schemes found:{0}'.format([m for m in oauth]))
return [m.lower() for m in oauth]
else:
log.debug('[docker] No Auth schemes found')
return []
# class to manipulate registry
class Registry:
# this is required for proper digest processing
HEADERS = {"Accept": "application/vnd.docker.distribution.manifest.v2+json"}
def __init__(self):
self.username = None
self.password = <PASSWORD>
self.auth_schemes = []
self.hostname = None
self.no_validate_ssl = False
self.http = None
self.last_error = None
self.digest_method = "HEAD"
def parse_login(self, login):
if login is not None:
if ':' not in login:
self.last_error = "Please provide -l in the form USER:PASSWORD"
return None, None
self.last_error = None
(username, password) = login.split(':', 1)
username = username.strip('"').strip("'")
password = password.strip('"').strip("'")
return username, password
return None, None
@staticmethod
def create(host, login, no_validate_ssl, digest_method="HEAD"):
r = Registry()
(r.username, r.password) = r.parse_login(login)
if r.last_error is not None:
log.error(r.last_error)
exit(1)
r.hostname = host
r.no_validate_ssl = no_validate_ssl
r.http = Requests()
r.digest_method = digest_method
return r
def send(self, path, method="GET"):
if 'bearer' in self.auth_schemes:
(result, self.HEADERS['Authorization']) = self.http.bearer_request(
method, "{0}{1}".format(self.hostname, path),
auth=(('', '') if self.username in ["", None] else (self.username, self.password)),
headers=self.HEADERS,
verify=not self.no_validate_ssl)
else:
result = self.http.request(
method, "{0}{1}".format(self.hostname, path),
headers=self.HEADERS,
auth=(None if self.username == "" else (self.username, self.password)),
verify=not self.no_validate_ssl)
# except Exception as error:
# print("cannot connect to {0}\nerror {1}".format(
# self.hostname,
# error))
# exit(1)
if str(result.status_code)[0] == '2':
self.last_error = None
return result
self.last_error = result.status_code
return None
def list_images(self):
result = self.send('/v2/_catalog?n=10000')
if result is None:
return []
return json.loads(result.text)['repositories']
def list_tags(self, image_name):
result = self.send("/v2/{0}/tags/list".format(image_name))
if result is None:
return []
try:
tags_list = json.loads(result.text)['tags']
except ValueError:
self.last_error = "list_tags: invalid json response"
return []
if tags_list is not None:
tags_list.sort(key=natural_keys)
return tags_list
# def list_tags_like(self, tag_like, args_tags_like):
# for tag_like in args_tags_like:
# print("tag like: {0}".format(tag_like))
# for tag in all_tags_list:
# if re.search(tag_like, tag):
# print("Adding {0} to tags list".format(tag))
def get_tag_digest(self, image_name, tag):
image_headers = self.send("/v2/{0}/manifests/{1}".format(
image_name, tag), method=self.digest_method)
if image_headers is None:
log.error(" tag digest not found: {0}.".format(self.last_error))
log.info(get_error_explanation("get_tag_digest", self.last_error))
return None
tag_digest = image_headers.headers['Docker-Content-Digest']
return tag_digest
def delete_tag(self, image_name, tag, dry_run, tag_digests_to_ignore):
if dry_run:
log.info('Would delete tag in dry run mode: %s', tag)
return False
tag_digest = self.get_tag_digest(image_name, tag)
if tag_digest in tag_digests_to_ignore:
log.info("Digest {0} for tag {1} is referenced by another tag or has already been deleted and"
" will be ignored".format(tag_digest, tag))
return True
if tag_digest is None:
return False
delete_result = self.send("/v2/{0}/manifests/{1}".format(
image_name, tag_digest), method="DELETE")
if delete_result is None:
log.error("failed, error: {0}".format(self.last_error))
log.info(get_error_explanation("delete_tag", self.last_error))
return False
tag_digests_to_ignore.append(tag_digest)
log.info("Delete image tag done: image=%s, tag=%s", image_name, tag)
return True
def list_tag_layers(self, image_name, tag):
layers_result = self.send("/v2/{0}/manifests/{1}".format(image_name, tag))
if layers_result is None:
log.error("error {0}".format(self.last_error))
return []
json_result = json.loads(layers_result.text)
if json_result['schemaVersion'] == 1:
layers = json_result['fsLayers']
else:
layers = json_result['layers']
return layers
def get_tag_config(self, image_name, tag):
config_result = self.send("/v2/{0}/manifests/{1}".format(image_name, tag))
if config_result is None:
log.error(" tag digest not found: {0}".format(self.last_error))
return None
json_result = json.loads(config_result.text)
log.debug("schemaVersion=%s, image=%s, tag=%s", json_result['schemaVersion'], image_name, tag)
if json_result['schemaVersion'] == 1:
log.warn("Docker schemaVersion 1 isn't supported for deleting by age now")
return None
else:
tag_config = json_result['config']
return tag_config
def get_image_age(self, image_name, image_config):
container_header = {"Accept": "{0}".format(image_config['mediaType'])}
if 'bearer' in self.auth_schemes:
container_header['Authorization'] = self.HEADERS['Authorization']
(response, self.HEADERS['Authorization']) = self.http.bearer_request(
"GET",
"{0}{1}".format(self.hostname, "/v2/{0}/blobs/{1}".format(image_name, image_config['digest'])),
auth=(('', '') if self.username in ["", None] else (self.username, self.password)),
headers=container_header,
verify=not self.no_validate_ssl)
else:
response = self.http.request("GET", "{0}{1}".format(self.hostname, "/v2/{0}/blobs/{1}".format(
image_name, image_config['digest'])),
headers=container_header,
auth=(None if self.username == "" else (self.username, self.password)),
verify=not self.no_validate_ssl)
if str(response.status_code)[0] == '2':
self.last_error = None
image_age = json.loads(response.text)
return image_age['created']
else:
log.error(" blob not found: {0}".format(self.last_error))
self.last_error = response.status_code
return []
def parse_args(args=None):
parser = argparse.ArgumentParser(
description="List or delete images from Docker registry",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=("""
IMPORTANT: after removing the tags, run the garbage collector
on your registry host:
docker-compose -f [path_to_your_docker_compose_file] run \\
registry bin/registry garbage-collect \\
/etc/docker/registry/config.yml
or if you are not using docker-compose:
docker run registry:2 bin/registry garbage-collect \\
/etc/docker/registry/config.yml
for more detail on garbage collection read here:
https://docs.docker.com/registry/garbage-collection/
"""))
parser.add_argument(
'-l', '--login',
help="Login and password for access to docker registry",
required=False,
metavar="USER:PASSWORD")
parser.add_argument(
'-w', '--read-password',
help="Read password from stdin (and prompt if stdin is a TTY); " +
"the final line-ending character(s) will be removed; " +
"the :PASSWORD portion of the -l option is not required and " +
"will be ignored",
action='store_const',
default=False,
const=True)
parser.add_argument(
'-r', '--host',
help="Hostname for registry server, e.g. https://example.com:5000",
required=False,
metavar="URL")
parser.add_argument(
'-d', '--delete',
help=('If specified, delete all but last {0} tags '
'of all images').format(CONST_KEEP_LAST_VERSIONS),
action='store_const',
default=False,
const=True)
parser.add_argument(
'-n', '--num',
help=('Set the number of tags to keep'
'({0} if not set)').format(CONST_KEEP_LAST_VERSIONS),
default=CONST_KEEP_LAST_VERSIONS,
nargs='?',
metavar='N')
parser.add_argument(
'--debug',
help='Turn debug output',
action='store_const',
default=False,
const=True)
parser.add_argument(
'--dry-run',
help=('If used in combination with --delete,'
'then images will not be deleted'),
action='store_const',
default=False,
const=True)
parser.add_argument(
'-i', '--image',
help='Specify images and tags to list/delete',
nargs='+',
metavar="IMAGE:[TAG]")
parser.add_argument(
'--keep-tags',
nargs='+',
help="List of tags that will be omitted from deletion if used in combination with --delete or --delete-all",
required=False,
default=[])
parser.add_argument(
'--tags-like',
nargs='+',
help="List of tags (regexp check) that will be handled",
required=False,
default=["release-", "prepare-", "meta-", "build-"])
parser.add_argument(
'--keep-tags-like',
nargs='+',
help="List of tags (regexp check) that will be omitted from deletion if used in combination with --delete or --delete-all",
required=False,
default=[])
parser.add_argument(
'--no-validate-ssl',
help="Disable ssl validation",
action='store_const',
default=False,
const=True)
parser.add_argument(
'--delete-all',
help="Will delete all tags. Be careful with this!",
const=True,
default=False,
action="store_const")
parser.add_argument(
'--layers',
help='Show layers digests for all images and all tags',
action='store_const',
default=False,
const=True)
parser.add_argument(
'--delete-by-days',
help='Will delete all tags that are older than specified days. Be careful!',
default=False,
nargs='?',
metavar='Days')
parser.add_argument(
'--keep-by-days',
help='Will keep all tags that are newer than specified days. Default keep 30 days',
default=30,
nargs='?',
metavar='Days')
parser.add_argument(
'--digest-method',
help='Use HEAD for standard docker registry or GET for NEXUS',
default='HEAD',
metavar="HEAD|GET"
)
return parser.parse_args(args)
def delete_tags(registry, image_name, dry_run, tags_to_delete, tags_to_keep):
keep_tag_digests = []
if not tags_to_delete:
log.info("No tags to delete, return...")
return
if tags_to_keep:
log.info("Getting digests for tags to keep:")
for tag in tags_to_keep:
log.info("Getting digest for tag {0}".format(tag))
digest = registry.get_tag_digest(image_name, tag)
if digest is None:
log.info("Tag {0} does not exist for image {1}. Ignore here.".format(tag, image_name))
continue
log.info("Keep digest {0} for tag {1}".format(digest, tag))
keep_tag_digests.append(digest)
for tag in tags_to_delete:
if tag in tags_to_keep:
continue
log.info(" deleting tag {0}".format(tag))
# deleting layers is disabled because
# it also deletes shared layers
##
# for layer in registry.list_tag_layers(image_name, tag):
# layer_digest = layer['digest']
# registry.delete_tag_layer(image_name, layer_digest, dry_run)
registry.delete_tag(image_name, tag, dry_run, keep_tag_digests)
def get_tags_like(args_tags_like, tags_list):
result = set()
for tag_like in args_tags_like:
log.info("tag like: {0}".format(tag_like))
for tag in tags_list:
if re.search(tag_like, tag):
result.add(tag)
return result
def get_tags(all_tags_list, image_name, tags_like):
"""
return a dict
"""
res_dict = {"others": set()}
# no ":" in image_name actually, if ":" specify in image name, will only process this tag
# get tags from image name if any
if ":" in image_name:
image_name, tag_name = image_name.split(":")
all_tags_list = [tag_name]
if tags_like:
for tag_like in tags_like:
res_dict.setdefault(tag_like, set())
for tag in all_tags_list:
if re.search(tag_like, tag):
res_dict[tag_like].add(tag)
else:
res_dict["others"].update(all_tags_list)
return res_dict
def delete_tags_by_age(registry, image_name, dry_run, days, tags_to_keep):
image_tags = registry.list_tags(image_name)
tags_to_delete = []
log.info('---------------------------------')
for tag in image_tags:
image_config = registry.get_tag_config(image_name, tag)
if not image_config:
log.info("tag not found")
continue
image_age = registry.get_image_age(image_name, image_config)
if not image_age:
log.info("timestamp not found")
continue
if dt.strptime(image_age[:-4], "%Y-%m-%dT%H:%M:%S.%f") < dt.now() - timedelta(days=int(days)):
log.info("will be deleted tag: {0} timestamp: {1}".format(tag, image_age))
tags_to_delete.append(tag)
log.info('------------deleting: delete_tags_by_age-------------')
delete_tags(registry, image_name, dry_run, tags_to_delete, tags_to_keep)
def get_newer_tags(registry, image_name, days, tags_list):
newer_tags = []
log.info('---------------------------------')
for tag in tags_list:
image_config = registry.get_tag_config(image_name, tag)
if not image_config:
log.warn("image tag config not found")
continue
image_age = registry.get_image_age(image_name, image_config)
if not image_age:
log.info("timestamp not found")
continue
if dt.strptime(image_age[:-4], "%Y-%m-%dT%H:%M:%S.%f") >= dt.now() - timedelta(days=int(days)):
log.info("Keeping tag: {0} timestamp: {1}".format(tag, image_age))
newer_tags.append(tag)
return newer_tags
def main_loop(args):
if args.debug:
log.setLevel(logging.DEBUG)
keep_last_versions = int(args.num)
if args.no_validate_ssl:
urllib3.disable_warnings(InsecureRequestWarning)
if args.read_password:
if args.login is None:
log.info("Please provide -l when using -w")
exit(1)
if ':' in args.login:
(username, password) = args.login.split(':', 1)
else:
username = args.login
if sys.stdin.isatty():
# likely interactive usage
password = <PASSWORD>()
else:
# allow password to be piped or redirected in
password = sys.stdin.read()
if len(password) == 0:
log.info("Password was not provided")
exit(1)
if password[-(len(os.linesep)):] == os.linesep:
password = password[0:-(len(os.linesep))]
args.login = username + ':' + password
registry = Registry.create(args.host, args.login, args.no_validate_ssl, args.digest_method)
registry.auth_schemes = get_auth_schemes(registry, '/v2/_catalog')
if args.delete:
log.info("Will delete all but {0} last tags".format(keep_last_versions))
if args.image is not None:
image_list = args.image
else:
image_list = registry.list_images()
# loop through registry's images
# or through the ones given in command line
for image_name in image_list:
log.info("---------------------------------")
log.info("Image: {0}".format(image_name))
all_tags_list = registry.list_tags(image_name)
if not all_tags_list:
log.info(" no tags!")
continue
tags_like_dict = get_tags(all_tags_list, image_name, args.tags_like)
tags_list = [v for k in tags_like_dict for v in tags_like_dict[k]]
log.debug("-------all tags for image: %s-----", image_name)
log.debug(pprint.pformat(sorted(tags_list, key=natural_keys)))
# print(tags and optionally layers
for tag in tags_list:
if args.layers:
log.info(" tag: {0}".format(tag))
for layer in registry.list_tag_layers(image_name, tag):
if 'size' in layer:
log.info(" layer: {0}, size: {1}".format(layer['digest'], layer['size']))
else:
log.info(" layer: {0}".format(layer['blobSum']))
# add tags to "tags_to_keep" list, if we have regexp "tags_to_keep"
# entries or a number of hours for "keep_by_hours":
keep_tags = []
for k, v in tags_like_dict.iteritems():
to_keep = sorted(v, key=natural_keys, reverse=True)[:keep_last_versions]
keep_tags.extend(to_keep)
if args.keep_tags_like:
keep_tags.extend(get_tags_like(args.keep_tags_like, tags_list))
if args.keep_by_days:
keep_tags.extend(get_newer_tags(registry, image_name, args.keep_by_days, tags_list))
# delete tags if told so
if args.delete or args.delete_all:
if args.delete_all:
tags_list_to_delete = list(tags_list)
else:
tags_list_to_delete = [tag for tag in tags_list if tag not in keep_tags]
keep_tags = sorted(set(keep_tags), key=natural_keys) # Eliminate duplicates
log.debug("-------keep tags---------")
log.debug(pprint.pformat(keep_tags))
log.info("-------to deleted tags---------")
log.info(pprint.pformat(tags_list_to_delete))
delete_tags(registry, image_name, args.dry_run, tags_list_to_delete, keep_tags)
# delete tags by age in days
if args.delete_by_days:
keep_tags.extend(args.keep_tags)
keep_tags = list(set(keep_tags)) # Eliminate duplicates
delete_tags_by_age(registry, image_name, args.dry_run, args.delete_by_days, keep_tags)
if __name__ == "__main__":
args = parse_args()
if args.debug:
log.setLevel(logging.DEBUG)
log.debug(pprint.pformat(args))
with open(os.path.join(os.path.dirname(__file__), "secret.json"), "r") as f:
data = json.load(f)
try:
args.login = "%s:%s" % (data['username'], data['password'])
args.host = data['host']
except KeyError as e:
log.error("raised keyError: %s", e)
exit(1)
try:
main_loop(args)
except KeyboardInterrupt:
log.info("Ctrl-C pressed, quitting")
exit(1)
| #!/usr/bin/env python
"""
this is a registry manipulator, can do following:
- list all images (including layers)
- delete images
- all except last N images
- all images and/or tags
#
run
registry.py -h
to get more help
#
important: after removing the tags, run the garbage collector
on your registry host:
docker-compose -f [path_to_your_docker_compose_file] run \
registry bin/registry garbage-collect \
/etc/docker/registry/config.yml
#
or if you are not using docker-compose:
docker run registry:2 bin/registry garbage-collect \
/etc/docker/registry/config.yml
#
for more detail on garbage collection read here:
https://docs.docker.com/registry/garbage-collection/
"""
import requests
import urllib3
import pprint
import base64
import re
import sys
import json
import os
import argparse
import www_authenticate
import logging
from logging import handlers
from getpass import getpass
from datetime import timedelta, datetime as dt
from urllib3.exceptions import InsecureRequestWarning
log = logging.Logger(__file__)
log.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
handler.setFormatter(
logging.Formatter("[%(levelname)s] %(asctime)s,%(lineno)4d, %(funcName)s : %(message)s", '%Y-%m-%d %H:%M:%S'))
fileHandler = handlers.RotatingFileHandler('/lain/app/clean.log', 'a', 10 * 1024 * 1024, 2)
fileHandler.setLevel(logging.DEBUG)
fileHandler.setFormatter(
logging.Formatter("[%(levelname)s] %(asctime)s,%(lineno)4d, %(funcName)s : %(message)s", '%Y-%m-%d %H:%M:%S'))
log.addHandler(handler)
log.addHandler(fileHandler)
# number of image versions to keep
CONST_KEEP_LAST_VERSIONS = 100
# this class is created for testing
class Requests:
def request(self, method, url, **kwargs):
return requests.request(method, url, **kwargs)
def bearer_request(self, method, url, auth, **kwargs):
log.debug("bearer_request()")
log.debug('[registry][request]: {0} {1}'.format(method, url))
if 'Authorization' in kwargs['headers']:
log.debug('[registry][request]: Authorization header:')
token_parsed = kwargs['headers']['Authorization'].split('.')
log.debug(pprint.pformat(json.loads(decode_base64(token_parsed[0]))))
log.debug(pprint.pformat(json.loads(decode_base64(token_parsed[1]))))
res = requests.request(method, url, **kwargs)
if str(res.status_code)[0] == '2':
log.debug("[registry] accepted")
return res, kwargs['headers']['Authorization']
if res.status_code == 401:
log.debug("[registry] Access denied. Refreshing token...")
oauth = www_authenticate.parse(res.headers['Www-Authenticate'])
log.debug('[auth][answer] Auth header:')
log.debug(pprint.pformat(oauth['bearer']))
log.info('retreiving bearer token for {0}'.format(oauth['bearer']['scope']))
# request_url = '{0}?service={1}&scope={2}'.format(oauth['bearer']['realm'],
# oauth['bearer']['service'],
# oauth['bearer']['scope'])
request_url = '{0}?service={1}&scope={2}'.format(oauth['bearer']['realm'],
oauth['bearer']['service'],
oauth['bearer']['scope'])
log.debug('[debug][auth][request] Refreshing auth token: POST {0}'.format(request_url))
try_oauth = requests.post(request_url, auth=auth, **kwargs)
try:
token = json.loads(try_oauth._content)['token']
log.info(">>> token: {}".format(token))
except SyntaxError:
log.error("\n\ncouldn't accure token: {0}".format(try_oauth._content))
sys.exit(1)
token_parsed = token.split('.')
log.debug('[auth] token issued: ')
log.debug(pprint.pformat(json.loads(decode_base64(token_parsed[0]))))
log.debug(pprint.pformat(json.loads(decode_base64(token_parsed[1]))))
kwargs['headers']['Authorization'] = 'Bearer {0}'.format(token)
else:
return res, kwargs['headers']['Authorization']
res = requests.request(method, url, **kwargs)
return res, kwargs['headers']['Authorization']
def natural_keys(text):
"""
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
"""
def __atoi(text):
return int(text) if text.isdigit() else text
return [__atoi(c) for c in re.split('(\d+)', text)]
def decode_base64(data):
"""
Decode base64, padding being optional.
:param data: Base64 data as an ASCII byte string
:returns: The decoded byte string.
"""
data = data.replace('Bearer ', '')
log.debug('base64 string to decode:\n{0}'.format(data))
missing_padding = len(data) % 4
if missing_padding != 0:
data += '=' * (4 - missing_padding)
if sys.version_info[0] <= 2:
return base64.decodestring(data)
else:
return base64.decodebytes(bytes(data, 'utf-8'))
def get_error_explanation(context, error_code):
error_list = {"delete_tag_405": 'You might want to set REGISTRY_STORAGE_DELETE_ENABLED: "true" in your registry',
"get_tag_digest_404": "Try adding flag --digest-method=GET"}
key = "%s_%s" % (context, error_code)
if key in error_list.keys():
return error_list[key]
return ''
def get_auth_schemes(r, path):
"""
Returns list of auth schemes(lowcased) if www-authenticate: header exists
returns None if no header found
- www-authenticate: basic
- www-authenticate: bearer
"""
try_oauth = requests.head('{0}{1}'.format(r.hostname, path), verify=not r.no_validate_ssl)
if 'Www-Authenticate' in try_oauth.headers:
oauth = www_authenticate.parse(try_oauth.headers['Www-Authenticate'])
log.debug('[docker] Auth schemes found:{0}'.format([m for m in oauth]))
return [m.lower() for m in oauth]
else:
log.debug('[docker] No Auth schemes found')
return []
# class to manipulate registry
class Registry:
# this is required for proper digest processing
HEADERS = {"Accept": "application/vnd.docker.distribution.manifest.v2+json"}
def __init__(self):
self.username = None
self.password = <PASSWORD>
self.auth_schemes = []
self.hostname = None
self.no_validate_ssl = False
self.http = None
self.last_error = None
self.digest_method = "HEAD"
def parse_login(self, login):
if login is not None:
if ':' not in login:
self.last_error = "Please provide -l in the form USER:PASSWORD"
return None, None
self.last_error = None
(username, password) = login.split(':', 1)
username = username.strip('"').strip("'")
password = password.strip('"').strip("'")
return username, password
return None, None
@staticmethod
def create(host, login, no_validate_ssl, digest_method="HEAD"):
r = Registry()
(r.username, r.password) = r.parse_login(login)
if r.last_error is not None:
log.error(r.last_error)
exit(1)
r.hostname = host
r.no_validate_ssl = no_validate_ssl
r.http = Requests()
r.digest_method = digest_method
return r
def send(self, path, method="GET"):
if 'bearer' in self.auth_schemes:
(result, self.HEADERS['Authorization']) = self.http.bearer_request(
method, "{0}{1}".format(self.hostname, path),
auth=(('', '') if self.username in ["", None] else (self.username, self.password)),
headers=self.HEADERS,
verify=not self.no_validate_ssl)
else:
result = self.http.request(
method, "{0}{1}".format(self.hostname, path),
headers=self.HEADERS,
auth=(None if self.username == "" else (self.username, self.password)),
verify=not self.no_validate_ssl)
# except Exception as error:
# print("cannot connect to {0}\nerror {1}".format(
# self.hostname,
# error))
# exit(1)
if str(result.status_code)[0] == '2':
self.last_error = None
return result
self.last_error = result.status_code
return None
def list_images(self):
result = self.send('/v2/_catalog?n=10000')
if result is None:
return []
return json.loads(result.text)['repositories']
def list_tags(self, image_name):
result = self.send("/v2/{0}/tags/list".format(image_name))
if result is None:
return []
try:
tags_list = json.loads(result.text)['tags']
except ValueError:
self.last_error = "list_tags: invalid json response"
return []
if tags_list is not None:
tags_list.sort(key=natural_keys)
return tags_list
# def list_tags_like(self, tag_like, args_tags_like):
# for tag_like in args_tags_like:
# print("tag like: {0}".format(tag_like))
# for tag in all_tags_list:
# if re.search(tag_like, tag):
# print("Adding {0} to tags list".format(tag))
def get_tag_digest(self, image_name, tag):
image_headers = self.send("/v2/{0}/manifests/{1}".format(
image_name, tag), method=self.digest_method)
if image_headers is None:
log.error(" tag digest not found: {0}.".format(self.last_error))
log.info(get_error_explanation("get_tag_digest", self.last_error))
return None
tag_digest = image_headers.headers['Docker-Content-Digest']
return tag_digest
def delete_tag(self, image_name, tag, dry_run, tag_digests_to_ignore):
if dry_run:
log.info('Would delete tag in dry run mode: %s', tag)
return False
tag_digest = self.get_tag_digest(image_name, tag)
if tag_digest in tag_digests_to_ignore:
log.info("Digest {0} for tag {1} is referenced by another tag or has already been deleted and"
" will be ignored".format(tag_digest, tag))
return True
if tag_digest is None:
return False
delete_result = self.send("/v2/{0}/manifests/{1}".format(
image_name, tag_digest), method="DELETE")
if delete_result is None:
log.error("failed, error: {0}".format(self.last_error))
log.info(get_error_explanation("delete_tag", self.last_error))
return False
tag_digests_to_ignore.append(tag_digest)
log.info("Delete image tag done: image=%s, tag=%s", image_name, tag)
return True
def list_tag_layers(self, image_name, tag):
layers_result = self.send("/v2/{0}/manifests/{1}".format(image_name, tag))
if layers_result is None:
log.error("error {0}".format(self.last_error))
return []
json_result = json.loads(layers_result.text)
if json_result['schemaVersion'] == 1:
layers = json_result['fsLayers']
else:
layers = json_result['layers']
return layers
def get_tag_config(self, image_name, tag):
config_result = self.send("/v2/{0}/manifests/{1}".format(image_name, tag))
if config_result is None:
log.error(" tag digest not found: {0}".format(self.last_error))
return None
json_result = json.loads(config_result.text)
log.debug("schemaVersion=%s, image=%s, tag=%s", json_result['schemaVersion'], image_name, tag)
if json_result['schemaVersion'] == 1:
log.warn("Docker schemaVersion 1 isn't supported for deleting by age now")
return None
else:
tag_config = json_result['config']
return tag_config
def get_image_age(self, image_name, image_config):
container_header = {"Accept": "{0}".format(image_config['mediaType'])}
if 'bearer' in self.auth_schemes:
container_header['Authorization'] = self.HEADERS['Authorization']
(response, self.HEADERS['Authorization']) = self.http.bearer_request(
"GET",
"{0}{1}".format(self.hostname, "/v2/{0}/blobs/{1}".format(image_name, image_config['digest'])),
auth=(('', '') if self.username in ["", None] else (self.username, self.password)),
headers=container_header,
verify=not self.no_validate_ssl)
else:
response = self.http.request("GET", "{0}{1}".format(self.hostname, "/v2/{0}/blobs/{1}".format(
image_name, image_config['digest'])),
headers=container_header,
auth=(None if self.username == "" else (self.username, self.password)),
verify=not self.no_validate_ssl)
if str(response.status_code)[0] == '2':
self.last_error = None
image_age = json.loads(response.text)
return image_age['created']
else:
log.error(" blob not found: {0}".format(self.last_error))
self.last_error = response.status_code
return []
def parse_args(args=None):
parser = argparse.ArgumentParser(
description="List or delete images from Docker registry",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=("""
IMPORTANT: after removing the tags, run the garbage collector
on your registry host:
docker-compose -f [path_to_your_docker_compose_file] run \\
registry bin/registry garbage-collect \\
/etc/docker/registry/config.yml
or if you are not using docker-compose:
docker run registry:2 bin/registry garbage-collect \\
/etc/docker/registry/config.yml
for more detail on garbage collection read here:
https://docs.docker.com/registry/garbage-collection/
"""))
parser.add_argument(
'-l', '--login',
help="Login and password for access to docker registry",
required=False,
metavar="USER:PASSWORD")
parser.add_argument(
'-w', '--read-password',
help="Read password from stdin (and prompt if stdin is a TTY); " +
"the final line-ending character(s) will be removed; " +
"the :PASSWORD portion of the -l option is not required and " +
"will be ignored",
action='store_const',
default=False,
const=True)
parser.add_argument(
'-r', '--host',
help="Hostname for registry server, e.g. https://example.com:5000",
required=False,
metavar="URL")
parser.add_argument(
'-d', '--delete',
help=('If specified, delete all but last {0} tags '
'of all images').format(CONST_KEEP_LAST_VERSIONS),
action='store_const',
default=False,
const=True)
parser.add_argument(
'-n', '--num',
help=('Set the number of tags to keep'
'({0} if not set)').format(CONST_KEEP_LAST_VERSIONS),
default=CONST_KEEP_LAST_VERSIONS,
nargs='?',
metavar='N')
parser.add_argument(
'--debug',
help='Turn debug output',
action='store_const',
default=False,
const=True)
parser.add_argument(
'--dry-run',
help=('If used in combination with --delete,'
'then images will not be deleted'),
action='store_const',
default=False,
const=True)
parser.add_argument(
'-i', '--image',
help='Specify images and tags to list/delete',
nargs='+',
metavar="IMAGE:[TAG]")
parser.add_argument(
'--keep-tags',
nargs='+',
help="List of tags that will be omitted from deletion if used in combination with --delete or --delete-all",
required=False,
default=[])
parser.add_argument(
'--tags-like',
nargs='+',
help="List of tags (regexp check) that will be handled",
required=False,
default=["release-", "prepare-", "meta-", "build-"])
parser.add_argument(
'--keep-tags-like',
nargs='+',
help="List of tags (regexp check) that will be omitted from deletion if used in combination with --delete or --delete-all",
required=False,
default=[])
parser.add_argument(
'--no-validate-ssl',
help="Disable ssl validation",
action='store_const',
default=False,
const=True)
parser.add_argument(
'--delete-all',
help="Will delete all tags. Be careful with this!",
const=True,
default=False,
action="store_const")
parser.add_argument(
'--layers',
help='Show layers digests for all images and all tags',
action='store_const',
default=False,
const=True)
parser.add_argument(
'--delete-by-days',
help='Will delete all tags that are older than specified days. Be careful!',
default=False,
nargs='?',
metavar='Days')
parser.add_argument(
'--keep-by-days',
help='Will keep all tags that are newer than specified days. Default keep 30 days',
default=30,
nargs='?',
metavar='Days')
parser.add_argument(
'--digest-method',
help='Use HEAD for standard docker registry or GET for NEXUS',
default='HEAD',
metavar="HEAD|GET"
)
return parser.parse_args(args)
def delete_tags(registry, image_name, dry_run, tags_to_delete, tags_to_keep):
keep_tag_digests = []
if not tags_to_delete:
log.info("No tags to delete, return...")
return
if tags_to_keep:
log.info("Getting digests for tags to keep:")
for tag in tags_to_keep:
log.info("Getting digest for tag {0}".format(tag))
digest = registry.get_tag_digest(image_name, tag)
if digest is None:
log.info("Tag {0} does not exist for image {1}. Ignore here.".format(tag, image_name))
continue
log.info("Keep digest {0} for tag {1}".format(digest, tag))
keep_tag_digests.append(digest)
for tag in tags_to_delete:
if tag in tags_to_keep:
continue
log.info(" deleting tag {0}".format(tag))
# deleting layers is disabled because
# it also deletes shared layers
##
# for layer in registry.list_tag_layers(image_name, tag):
# layer_digest = layer['digest']
# registry.delete_tag_layer(image_name, layer_digest, dry_run)
registry.delete_tag(image_name, tag, dry_run, keep_tag_digests)
def get_tags_like(args_tags_like, tags_list):
result = set()
for tag_like in args_tags_like:
log.info("tag like: {0}".format(tag_like))
for tag in tags_list:
if re.search(tag_like, tag):
result.add(tag)
return result
def get_tags(all_tags_list, image_name, tags_like):
"""
return a dict
"""
res_dict = {"others": set()}
# no ":" in image_name actually, if ":" specify in image name, will only process this tag
# get tags from image name if any
if ":" in image_name:
image_name, tag_name = image_name.split(":")
all_tags_list = [tag_name]
if tags_like:
for tag_like in tags_like:
res_dict.setdefault(tag_like, set())
for tag in all_tags_list:
if re.search(tag_like, tag):
res_dict[tag_like].add(tag)
else:
res_dict["others"].update(all_tags_list)
return res_dict
def delete_tags_by_age(registry, image_name, dry_run, days, tags_to_keep):
image_tags = registry.list_tags(image_name)
tags_to_delete = []
log.info('---------------------------------')
for tag in image_tags:
image_config = registry.get_tag_config(image_name, tag)
if not image_config:
log.info("tag not found")
continue
image_age = registry.get_image_age(image_name, image_config)
if not image_age:
log.info("timestamp not found")
continue
if dt.strptime(image_age[:-4], "%Y-%m-%dT%H:%M:%S.%f") < dt.now() - timedelta(days=int(days)):
log.info("will be deleted tag: {0} timestamp: {1}".format(tag, image_age))
tags_to_delete.append(tag)
log.info('------------deleting: delete_tags_by_age-------------')
delete_tags(registry, image_name, dry_run, tags_to_delete, tags_to_keep)
def get_newer_tags(registry, image_name, days, tags_list):
newer_tags = []
log.info('---------------------------------')
for tag in tags_list:
image_config = registry.get_tag_config(image_name, tag)
if not image_config:
log.warn("image tag config not found")
continue
image_age = registry.get_image_age(image_name, image_config)
if not image_age:
log.info("timestamp not found")
continue
if dt.strptime(image_age[:-4], "%Y-%m-%dT%H:%M:%S.%f") >= dt.now() - timedelta(days=int(days)):
log.info("Keeping tag: {0} timestamp: {1}".format(tag, image_age))
newer_tags.append(tag)
return newer_tags
def main_loop(args):
if args.debug:
log.setLevel(logging.DEBUG)
keep_last_versions = int(args.num)
if args.no_validate_ssl:
urllib3.disable_warnings(InsecureRequestWarning)
if args.read_password:
if args.login is None:
log.info("Please provide -l when using -w")
exit(1)
if ':' in args.login:
(username, password) = args.login.split(':', 1)
else:
username = args.login
if sys.stdin.isatty():
# likely interactive usage
password = <PASSWORD>()
else:
# allow password to be piped or redirected in
password = sys.stdin.read()
if len(password) == 0:
log.info("Password was not provided")
exit(1)
if password[-(len(os.linesep)):] == os.linesep:
password = password[0:-(len(os.linesep))]
args.login = username + ':' + password
registry = Registry.create(args.host, args.login, args.no_validate_ssl, args.digest_method)
registry.auth_schemes = get_auth_schemes(registry, '/v2/_catalog')
if args.delete:
log.info("Will delete all but {0} last tags".format(keep_last_versions))
if args.image is not None:
image_list = args.image
else:
image_list = registry.list_images()
# loop through registry's images
# or through the ones given in command line
for image_name in image_list:
log.info("---------------------------------")
log.info("Image: {0}".format(image_name))
all_tags_list = registry.list_tags(image_name)
if not all_tags_list:
log.info(" no tags!")
continue
tags_like_dict = get_tags(all_tags_list, image_name, args.tags_like)
tags_list = [v for k in tags_like_dict for v in tags_like_dict[k]]
log.debug("-------all tags for image: %s-----", image_name)
log.debug(pprint.pformat(sorted(tags_list, key=natural_keys)))
# print(tags and optionally layers
for tag in tags_list:
if args.layers:
log.info(" tag: {0}".format(tag))
for layer in registry.list_tag_layers(image_name, tag):
if 'size' in layer:
log.info(" layer: {0}, size: {1}".format(layer['digest'], layer['size']))
else:
log.info(" layer: {0}".format(layer['blobSum']))
# add tags to "tags_to_keep" list, if we have regexp "tags_to_keep"
# entries or a number of hours for "keep_by_hours":
keep_tags = []
for k, v in tags_like_dict.iteritems():
to_keep = sorted(v, key=natural_keys, reverse=True)[:keep_last_versions]
keep_tags.extend(to_keep)
if args.keep_tags_like:
keep_tags.extend(get_tags_like(args.keep_tags_like, tags_list))
if args.keep_by_days:
keep_tags.extend(get_newer_tags(registry, image_name, args.keep_by_days, tags_list))
# delete tags if told so
if args.delete or args.delete_all:
if args.delete_all:
tags_list_to_delete = list(tags_list)
else:
tags_list_to_delete = [tag for tag in tags_list if tag not in keep_tags]
keep_tags = sorted(set(keep_tags), key=natural_keys) # Eliminate duplicates
log.debug("-------keep tags---------")
log.debug(pprint.pformat(keep_tags))
log.info("-------to deleted tags---------")
log.info(pprint.pformat(tags_list_to_delete))
delete_tags(registry, image_name, args.dry_run, tags_list_to_delete, keep_tags)
# delete tags by age in days
if args.delete_by_days:
keep_tags.extend(args.keep_tags)
keep_tags = list(set(keep_tags)) # Eliminate duplicates
delete_tags_by_age(registry, image_name, args.dry_run, args.delete_by_days, keep_tags)
if __name__ == "__main__":
args = parse_args()
if args.debug:
log.setLevel(logging.DEBUG)
log.debug(pprint.pformat(args))
with open(os.path.join(os.path.dirname(__file__), "secret.json"), "r") as f:
data = json.load(f)
try:
args.login = "%s:%s" % (data['username'], data['password'])
args.host = data['host']
except KeyError as e:
log.error("raised keyError: %s", e)
exit(1)
try:
main_loop(args)
except KeyboardInterrupt:
log.info("Ctrl-C pressed, quitting")
exit(1)
| en | 0.647358 | #!/usr/bin/env python this is a registry manipulator, can do following: - list all images (including layers) - delete images - all except last N images - all images and/or tags # run registry.py -h to get more help # important: after removing the tags, run the garbage collector on your registry host: docker-compose -f [path_to_your_docker_compose_file] run \ registry bin/registry garbage-collect \ /etc/docker/registry/config.yml # or if you are not using docker-compose: docker run registry:2 bin/registry garbage-collect \ /etc/docker/registry/config.yml # for more detail on garbage collection read here: https://docs.docker.com/registry/garbage-collection/ # number of image versions to keep # this class is created for testing # request_url = '{0}?service={1}&scope={2}'.format(oauth['bearer']['realm'], # oauth['bearer']['service'], # oauth['bearer']['scope']) alist.sort(key=natural_keys) sorts in human order http://nedbatchelder.com/blog/200712/human_sorting.html (See Toothy's implementation in the comments) Decode base64, padding being optional. :param data: Base64 data as an ASCII byte string :returns: The decoded byte string. Returns list of auth schemes(lowcased) if www-authenticate: header exists returns None if no header found - www-authenticate: basic - www-authenticate: bearer # class to manipulate registry # this is required for proper digest processing # except Exception as error: # print("cannot connect to {0}\nerror {1}".format( # self.hostname, # error)) # exit(1) # def list_tags_like(self, tag_like, args_tags_like): # for tag_like in args_tags_like: # print("tag like: {0}".format(tag_like)) # for tag in all_tags_list: # if re.search(tag_like, tag): # print("Adding {0} to tags list".format(tag)) IMPORTANT: after removing the tags, run the garbage collector on your registry host: docker-compose -f [path_to_your_docker_compose_file] run \\ registry bin/registry garbage-collect \\ /etc/docker/registry/config.yml or if you are not using docker-compose: docker run registry:2 bin/registry garbage-collect \\ /etc/docker/registry/config.yml for more detail on garbage collection read here: https://docs.docker.com/registry/garbage-collection/ # deleting layers is disabled because # it also deletes shared layers ## # for layer in registry.list_tag_layers(image_name, tag): # layer_digest = layer['digest'] # registry.delete_tag_layer(image_name, layer_digest, dry_run) return a dict # no ":" in image_name actually, if ":" specify in image name, will only process this tag # get tags from image name if any # likely interactive usage # allow password to be piped or redirected in # loop through registry's images # or through the ones given in command line # print(tags and optionally layers # add tags to "tags_to_keep" list, if we have regexp "tags_to_keep" # entries or a number of hours for "keep_by_hours": # delete tags if told so # Eliminate duplicates # delete tags by age in days # Eliminate duplicates | 2.453487 | 2 |
src/touri_core/scripts/touri_core/touri.py | sudo-prakhar/touri_ros-1 | 0 | 6621423 | #!/usr/bin/env python
"""
TouRI Robot Base Code
"""
__author__ = "<NAME>"
__mail__ = "<EMAIL>"
__copyright__ = "NONE"
# -----------------------------------------------------------------------------
import rospy
from stretch_body.robot import Robot
from touri_mani.msg import TeleopMani
from touri_nav.msg import TeleopNav
from pynput import keyboard
import time
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# TouRI_bot
class TouRI_bot(Robot):
def __init__(self):
Robot.__init__(self)
self.startup()
self.base.startup()
self.lift.startup()
self.arm.startup()
self.setup_motion_vars()
# -------------------------------------------------------------------------
def setup_motion_vars(self):
self.base_trans_vel = 0
self.base_rot_vel = 0
self.lift_step = 0.1
self.arm_step = 0.1
# -------------------------------------------------------------------------
def _stop(self):
self.arm.stop()
self.lift.stop()
self.base.stop()
self.stop()
# -------------------------------------------------------------------------
def __str__(self):
return "TouRI - An RI Touring Robot"
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# TouRI
class TouRI():
_instance = None
def __init__(self):
raise RuntimeError('Call TouRI.instance() instead')
@classmethod
def instance(cls):
if not cls._instance:
cls._instance = TouRI_bot()
return cls._instance
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
touri_bot = TouRI.instance()
# -----------------------------------------------------------------------------
def callback_nav(data):
rospy.loginfo("X: {} Y: {}".format(data.x, data.y))
trans_vel = 0 if (data.y<0.2 and data.y>-0.2) else 0.1*data.y
rot_vel = 0 if (data.x<0.2 and data.x>-0.2) else 0.3*data.x
touri_bot.base.set_velocity(trans_vel,rot_vel)
touri_bot.push_command()
# -----------------------------------------------------------------------------
def callback_mani(data):
rospy.loginfo("H: {} E: {}".format(data.height, data.extend))
lift_step = 0 if (data.height<0.2 and data.height>-0.2) else \
0.1*data.height
arm_step = 0 if (data.extend<0.2 and data.extend>-0.2) else \
(-0.1)*data.extend
print("lift_step",lift_step)
print("arm_step",arm_step)
touri_bot.lift.move_by(lift_step)
touri_bot.arm.move_by(arm_step)
touri_bot.push_command()
# -----------------------------------------------------------------------------
def touri_actuate():
rospy.init_node('touri_core',anonymous=True)
rospy.Subscriber("teleop_nav", TeleopNav, callback_nav)
rospy.Subscriber("teleop_mani", TeleopMani, callback_mani)
rospy.spin()
# -----------------------------------------------------------------------------
if __name__=="__main__":
print(touri_bot)
touri_actuate()
| #!/usr/bin/env python
"""
TouRI Robot Base Code
"""
__author__ = "<NAME>"
__mail__ = "<EMAIL>"
__copyright__ = "NONE"
# -----------------------------------------------------------------------------
import rospy
from stretch_body.robot import Robot
from touri_mani.msg import TeleopMani
from touri_nav.msg import TeleopNav
from pynput import keyboard
import time
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# TouRI_bot
class TouRI_bot(Robot):
def __init__(self):
Robot.__init__(self)
self.startup()
self.base.startup()
self.lift.startup()
self.arm.startup()
self.setup_motion_vars()
# -------------------------------------------------------------------------
def setup_motion_vars(self):
self.base_trans_vel = 0
self.base_rot_vel = 0
self.lift_step = 0.1
self.arm_step = 0.1
# -------------------------------------------------------------------------
def _stop(self):
self.arm.stop()
self.lift.stop()
self.base.stop()
self.stop()
# -------------------------------------------------------------------------
def __str__(self):
return "TouRI - An RI Touring Robot"
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# TouRI
class TouRI():
_instance = None
def __init__(self):
raise RuntimeError('Call TouRI.instance() instead')
@classmethod
def instance(cls):
if not cls._instance:
cls._instance = TouRI_bot()
return cls._instance
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
touri_bot = TouRI.instance()
# -----------------------------------------------------------------------------
def callback_nav(data):
rospy.loginfo("X: {} Y: {}".format(data.x, data.y))
trans_vel = 0 if (data.y<0.2 and data.y>-0.2) else 0.1*data.y
rot_vel = 0 if (data.x<0.2 and data.x>-0.2) else 0.3*data.x
touri_bot.base.set_velocity(trans_vel,rot_vel)
touri_bot.push_command()
# -----------------------------------------------------------------------------
def callback_mani(data):
rospy.loginfo("H: {} E: {}".format(data.height, data.extend))
lift_step = 0 if (data.height<0.2 and data.height>-0.2) else \
0.1*data.height
arm_step = 0 if (data.extend<0.2 and data.extend>-0.2) else \
(-0.1)*data.extend
print("lift_step",lift_step)
print("arm_step",arm_step)
touri_bot.lift.move_by(lift_step)
touri_bot.arm.move_by(arm_step)
touri_bot.push_command()
# -----------------------------------------------------------------------------
def touri_actuate():
rospy.init_node('touri_core',anonymous=True)
rospy.Subscriber("teleop_nav", TeleopNav, callback_nav)
rospy.Subscriber("teleop_mani", TeleopMani, callback_mani)
rospy.spin()
# -----------------------------------------------------------------------------
if __name__=="__main__":
print(touri_bot)
touri_actuate()
| en | 0.126218 | #!/usr/bin/env python TouRI Robot Base Code # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # TouRI_bot # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # ------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # TouRI # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- | 2.34576 | 2 |
hydrocarbon_problem/env/env_test.py | lollcat/Aspen-RL | 1 | 6621424 | <gh_stars>1-10
import numpy as np
import time
from hydrocarbon_problem.api.api_base import BaseAspenDistillationAPI
from hydrocarbon_problem.env.env import AspenDistillation
def make_fake_agent(env: AspenDistillation):
def fake_agent(obs):
del(obs)
discrete_spec, continuous_spec = env.action_spec()
discrete_action = np.random.randint(0, discrete_spec.num_values, size=())
continuous_action = np.random.uniform(low=continuous_spec.minimum,
high=continuous_spec.maximum,
size=continuous_spec.shape)
return discrete_action, continuous_action
return fake_agent
def test(n_episodes: int = 2500, use_fake_api: bool = True):
"""This test runs multiple environment episodes, running some simple sanity
checks along the way.
"""
# api = FakeDistillationAPI() # this can be changed to AspenAPI to test with Aspen
if use_fake_api:
from hydrocarbon_problem.api.fake_api import FakeDistillationAPI
api = FakeDistillationAPI() # this can be changed to AspenAPI to test with Aspen
else:
from hydrocarbon_problem.api.aspen_api import AspenAPI
api = AspenAPI(max_solve_iterations=100)
env = AspenDistillation(flowsheet_api=api)
agent = make_fake_agent(env)
simulation_time = []
episodic_time = []
converged = []
_return = []
episode = 1
for i in range(n_episodes):
start = time.time()
print(f"Episode: {episode}")
timestep = env.reset()
episode_return = 0
n_streams = 1
while not timestep.last():
observation = timestep.observation.upcoming_state
action = agent(observation)
timestep, duration, run_converged = env.step(action)
simulation_time.append(duration)
converged.append(run_converged)
print(timestep)
episode_return += timestep.reward
discrete_action = action[0]
if discrete_action == 0: # choose not to seperate
# if we don't seperate then the created states are black, 0 reward is given, and
# the discount for the created states is zero
assert timestep.reward == 0.0
assert timestep.discount.created_states == (0, 0)
assert (timestep.observation.created_states[1] == env._blank_state).all()
assert (timestep.observation.created_states[0] == env._blank_state).all()
else:
n_streams += 2 # 2 new streams created
# if we choose to seperate a stream, then the reward should be non-zero, the created state
# discount's should both be 1, the created_states should have non-zero values.
assert not timestep.reward == 0.0
if env._stream_table[-2].is_product:
# if tops is product, check discount is 0 else, check discount is 1
assert timestep.discount.created_states[0] == 0
else:
assert timestep.discount.created_states[0] == 1
if env._stream_table[-1].is_product:
# if bots is product, check discount is 0 else, check discount is 1
assert timestep.discount.created_states[1] == 0
else:
assert timestep.discount.created_states[1] == 1
assert not (timestep.observation.created_states[1] == env._blank_state).all()
assert not (timestep.observation.created_states[0] == env._blank_state).all()
if not timestep.last():
# if the episode is not done, then check that the upcoming observation has
# non-zero values
assert not (timestep.observation.upcoming_state == env._blank_state).all()
# check the stream table has the correct number of streams
assert len(env._stream_table) == n_streams
episode_timer = time.time() - start
print(f"episode complete with return of {episode_return}")
_return.append(episode_return)
episodic_time.append(episode_timer)
episode += 1
if use_fake_api is False:
api: AspenAPI
simulation = api._flowsheet
# now if I want to I can acesss some variable saved in simulation
print(simulation)
return simulation_time, converged, _return, episodic_time
if __name__ == '__main__':
use_fake_api = True
if use_fake_api:
test(100)
else:
simulation_time, converged, _return, episodic_time = test()
# Separate the convergence data
unconverged_separations = [index for (index, item) in enumerate(converged) if item == False]
iterations_without_separation = [index for (index, item) in enumerate(converged) if item == "no separation"]
converged_separation = [index for (index, item) in enumerate(converged) if item == True]
# Number of non-Aspen runs
number_of_iterations_without_separation = len(iterations_without_separation)
# Number of Aspen runs
number_of_unconverged_separations = len(unconverged_separations)
number_of_converged_separations = len(converged_separation)
number_of_non_separations = len(iterations_without_separation)
total_separations = number_of_unconverged_separations + number_of_converged_separations
percent_unconverged_separations = 100 * number_of_unconverged_separations/total_separations
percent_converged_separations = 100 * number_of_converged_separations/total_separations
# Filter returns
rl_returns = []
filtered_return = [index for (index, item) in enumerate(_return) if item != 0]
for i in filtered_return:
j = _return[i]
rl_returns.append(j)
average_rl_returns = np.average(rl_returns)
# Filter simulation times and calculate the average
aspen_time = []
sim_time = [index for (index, item) in enumerate(simulation_time) if item != "no separation"]
for i in sim_time:
j = simulation_time[i]
aspen_time.append(j)
aspen_time = np.average(aspen_time)
if number_of_converged_separations == 0 and number_of_unconverged_separations == 0:
print("no separations were performed")
print(f"Number of iterations = {len(converged)}")
else:
print(f"Number of iterations: {len(converged)}")
print(f"Number of unconverged separations: {number_of_unconverged_separations}, "
f"{percent_unconverged_separations} %")
print(f"Number of converged separations: {number_of_converged_separations}, "
f"{percent_converged_separations} %")
print(f"Number of non separations: {number_of_non_separations}")
# print(f"Episodic returns: {_return}")
print(f"Average return: {average_rl_returns}")
print(f"Average Aspen time: {aspen_time}")
# print(f"Total sim array {simulation_time}")
# print(f"Episodic time: {episodic_time}")
print(f"Average episodic time: {np.average(episodic_time)}")
| import numpy as np
import time
from hydrocarbon_problem.api.api_base import BaseAspenDistillationAPI
from hydrocarbon_problem.env.env import AspenDistillation
def make_fake_agent(env: AspenDistillation):
def fake_agent(obs):
del(obs)
discrete_spec, continuous_spec = env.action_spec()
discrete_action = np.random.randint(0, discrete_spec.num_values, size=())
continuous_action = np.random.uniform(low=continuous_spec.minimum,
high=continuous_spec.maximum,
size=continuous_spec.shape)
return discrete_action, continuous_action
return fake_agent
def test(n_episodes: int = 2500, use_fake_api: bool = True):
"""This test runs multiple environment episodes, running some simple sanity
checks along the way.
"""
# api = FakeDistillationAPI() # this can be changed to AspenAPI to test with Aspen
if use_fake_api:
from hydrocarbon_problem.api.fake_api import FakeDistillationAPI
api = FakeDistillationAPI() # this can be changed to AspenAPI to test with Aspen
else:
from hydrocarbon_problem.api.aspen_api import AspenAPI
api = AspenAPI(max_solve_iterations=100)
env = AspenDistillation(flowsheet_api=api)
agent = make_fake_agent(env)
simulation_time = []
episodic_time = []
converged = []
_return = []
episode = 1
for i in range(n_episodes):
start = time.time()
print(f"Episode: {episode}")
timestep = env.reset()
episode_return = 0
n_streams = 1
while not timestep.last():
observation = timestep.observation.upcoming_state
action = agent(observation)
timestep, duration, run_converged = env.step(action)
simulation_time.append(duration)
converged.append(run_converged)
print(timestep)
episode_return += timestep.reward
discrete_action = action[0]
if discrete_action == 0: # choose not to seperate
# if we don't seperate then the created states are black, 0 reward is given, and
# the discount for the created states is zero
assert timestep.reward == 0.0
assert timestep.discount.created_states == (0, 0)
assert (timestep.observation.created_states[1] == env._blank_state).all()
assert (timestep.observation.created_states[0] == env._blank_state).all()
else:
n_streams += 2 # 2 new streams created
# if we choose to seperate a stream, then the reward should be non-zero, the created state
# discount's should both be 1, the created_states should have non-zero values.
assert not timestep.reward == 0.0
if env._stream_table[-2].is_product:
# if tops is product, check discount is 0 else, check discount is 1
assert timestep.discount.created_states[0] == 0
else:
assert timestep.discount.created_states[0] == 1
if env._stream_table[-1].is_product:
# if bots is product, check discount is 0 else, check discount is 1
assert timestep.discount.created_states[1] == 0
else:
assert timestep.discount.created_states[1] == 1
assert not (timestep.observation.created_states[1] == env._blank_state).all()
assert not (timestep.observation.created_states[0] == env._blank_state).all()
if not timestep.last():
# if the episode is not done, then check that the upcoming observation has
# non-zero values
assert not (timestep.observation.upcoming_state == env._blank_state).all()
# check the stream table has the correct number of streams
assert len(env._stream_table) == n_streams
episode_timer = time.time() - start
print(f"episode complete with return of {episode_return}")
_return.append(episode_return)
episodic_time.append(episode_timer)
episode += 1
if use_fake_api is False:
api: AspenAPI
simulation = api._flowsheet
# now if I want to I can acesss some variable saved in simulation
print(simulation)
return simulation_time, converged, _return, episodic_time
if __name__ == '__main__':
use_fake_api = True
if use_fake_api:
test(100)
else:
simulation_time, converged, _return, episodic_time = test()
# Separate the convergence data
unconverged_separations = [index for (index, item) in enumerate(converged) if item == False]
iterations_without_separation = [index for (index, item) in enumerate(converged) if item == "no separation"]
converged_separation = [index for (index, item) in enumerate(converged) if item == True]
# Number of non-Aspen runs
number_of_iterations_without_separation = len(iterations_without_separation)
# Number of Aspen runs
number_of_unconverged_separations = len(unconverged_separations)
number_of_converged_separations = len(converged_separation)
number_of_non_separations = len(iterations_without_separation)
total_separations = number_of_unconverged_separations + number_of_converged_separations
percent_unconverged_separations = 100 * number_of_unconverged_separations/total_separations
percent_converged_separations = 100 * number_of_converged_separations/total_separations
# Filter returns
rl_returns = []
filtered_return = [index for (index, item) in enumerate(_return) if item != 0]
for i in filtered_return:
j = _return[i]
rl_returns.append(j)
average_rl_returns = np.average(rl_returns)
# Filter simulation times and calculate the average
aspen_time = []
sim_time = [index for (index, item) in enumerate(simulation_time) if item != "no separation"]
for i in sim_time:
j = simulation_time[i]
aspen_time.append(j)
aspen_time = np.average(aspen_time)
if number_of_converged_separations == 0 and number_of_unconverged_separations == 0:
print("no separations were performed")
print(f"Number of iterations = {len(converged)}")
else:
print(f"Number of iterations: {len(converged)}")
print(f"Number of unconverged separations: {number_of_unconverged_separations}, "
f"{percent_unconverged_separations} %")
print(f"Number of converged separations: {number_of_converged_separations}, "
f"{percent_converged_separations} %")
print(f"Number of non separations: {number_of_non_separations}")
# print(f"Episodic returns: {_return}")
print(f"Average return: {average_rl_returns}")
print(f"Average Aspen time: {aspen_time}")
# print(f"Total sim array {simulation_time}")
# print(f"Episodic time: {episodic_time}")
print(f"Average episodic time: {np.average(episodic_time)}") | en | 0.857245 | This test runs multiple environment episodes, running some simple sanity checks along the way. # api = FakeDistillationAPI() # this can be changed to AspenAPI to test with Aspen # this can be changed to AspenAPI to test with Aspen # choose not to seperate # if we don't seperate then the created states are black, 0 reward is given, and # the discount for the created states is zero # 2 new streams created # if we choose to seperate a stream, then the reward should be non-zero, the created state # discount's should both be 1, the created_states should have non-zero values. # if tops is product, check discount is 0 else, check discount is 1 # if bots is product, check discount is 0 else, check discount is 1 # if the episode is not done, then check that the upcoming observation has # non-zero values # check the stream table has the correct number of streams # now if I want to I can acesss some variable saved in simulation # Separate the convergence data # Number of non-Aspen runs # Number of Aspen runs # Filter returns # Filter simulation times and calculate the average # print(f"Episodic returns: {_return}") # print(f"Total sim array {simulation_time}") # print(f"Episodic time: {episodic_time}") | 2.529789 | 3 |
app/dashboard/layout.py | salukadev/FDM-Mini-Project | 0 | 6621425 | from dash import dcc
from dash import html
import dash_bootstrap_components as dbc
from ..Plots import alzheimer_clusterringPlot as ac
import plotly.graph_objs as go
import plotly.express as px
import pandas as pd
import os
# navbar =
fig2 = ac.alzheimer_clusterPlot()
fig2.update_layout({
"plot_bgcolor": "rgba(0, 0, 0, 0)",
"paper_bgcolor": "rgba(0, 0, 0, 0)",
"font_color":"white",
"legend_font_color" : "white"
})
#data retrieval alzheimer
alzheimerlength = pd.read_csv("app/datasets/alzheimer.csv")
alzheimerdatalength = len(alzheimerlength)
dementedPatients = alzheimerlength[alzheimerlength['Group'].str.contains('Demented')]
dementedPatients = len(dementedPatients)
noncritical = alzheimerlength[alzheimerlength['Group'].str.contains('Converted')]
noncritical = len(noncritical)
#data retrieval covid 19
covidlength = pd.read_csv("app/datasets/covid-19 symptoms dataset.csv")
covidDatalength = len(covidlength)
# covidPatients = covidlength[covidlength['infectionProb'].str.contains(1)]
#pie chart plot
pieChartFig = px.pie(alzheimerlength, values='MMSE', names='Group')
pieChartFig.update_layout({
"plot_bgcolor": "rgba(0, 0, 0, 0)",
"paper_bgcolor": "rgba(0, 0, 0, 0)",
"font_color":"white",
"legend_font_color" : "white"
})
pieChartFig2 = px.pie(alzheimerlength, names='M/F', color='M/F', color_discrete_sequence=px.colors.sequential.RdBu)
pieChartFig2.update_layout({
"plot_bgcolor": "rgba(0, 0, 0, 0)",
"paper_bgcolor": "rgba(0, 0, 0, 0)",
"font_color":"white",
"legend_font_color" : "white"
})
pieChartFig3 = px.pie(alzheimerlength, names='CDR', color_discrete_sequence=px.colors.sequential.RdBu)
pieChartFig3.update_layout({
"plot_bgcolor": "rgba(0, 0, 0, 0)",
"paper_bgcolor": "rgba(0, 0, 0, 0)",
"font_color":"white",
"legend_font_color" : "white"
})
#covid plots
a = ['Total cases', 'Confirmed cases', 'Non-critical cases']
figc1 = go.Figure([go.Bar(x=a, y=[2575, 1005, 1570])])
figc1.update_layout({
"plot_bgcolor": "rgba(0, 0, 0, 0)",
"paper_bgcolor": "rgba(0, 0, 0, 0)",
"font_color":"white",
"legend_font_color" : "white",
# "coloraxis" : "red"
})
figc2 = px.pie(covidlength, names='bodyPain', color='bodyPain', color_discrete_sequence=px.colors.sequential.RdBu)
figc3 = px.pie(covidlength, names='fever', color='fever', color_discrete_sequence=px.colors.sequential.RdBu)
df = px.data.iris()
fig = px.scatter_3d(df, x='sepal_length', y='sepal_width', z='petal_width',
color='species')
layout = html.Div(id='main', children=[
dbc.NavbarSimple(
children=[
dbc.NavItem(dbc.NavLink("Page 1", href="#")),
dbc.DropdownMenu(
children=[
dbc.DropdownMenuItem("More pages", header=True),
dbc.DropdownMenuItem("Page 2", href="#"),
dbc.DropdownMenuItem("Page 3", href="#"),
],
nav=True,
in_navbar=True,
label="More",
),
],
brand="AI Doc Dasboard",
brand_href="#",
color="black",
dark=True,
),
html.Div(
[
html.H4("Alzheimer", className="card-title",style={"color": "white", "margin": "10px"}),
dbc.Row(
[
dbc.Col(html.Div(
dbc.Card(
dbc.CardBody(
[
html.H4("Total visits", className="card-title"),
html.H1(alzheimerdatalength, className="card-subtitle"),
# html.P(
# "Some quick example text to build on the card title and make "
# "up the bulk of the card's content.",
# className="card-text",
# )
]
),
style={"width": "500", "margin": "10px","color": "white" ,"background-color":"#2a2a72", "border-radius":"20px"},
)
)),
dbc.Col(html.Div(
dbc.Card(
dbc.CardBody(
[
html.H4("Confirmed cases", className="card-title"),
html.H1(dementedPatients, className="card-subtitle"),
# html.P(
# "Some quick example text to build on the card title and make "
# "up the bulk of the card's content.",
# className="card-text",
# ),
# dbc.CardLink("Card link", href="#"),
# dbc.CardLink("External link", href="https://google.com"),
]
),
style={"width": "500", "margin": "10px", "color": "white" ,"background-color":"red", "border-radius":"20px"},
)
)),
dbc.Col(html.Div(
dbc.Card(
dbc.CardBody(
[
html.H4("Non- Critical", className="card-title"),
html.H1(noncritical, className="card-subtitle"),
# html.P(
# "Some quick example text to build on the card title and make "
# "up the bulk of the card's content.",
# className="card-text",
# ),
# dbc.CardLink("Card link", href="#"),
# dbc.CardLink("External link", href="https://google.com"),
]
),
style={"width": "500", "margin": "10px", "color": "white" ,"background-color":"green", "border-radius":"20px"},
)
)),
]
),
dbc.Row(dbc.Col(html.Div(
dbc.Card(
dbc.CardBody(
[
html.H4("", className="card-title"),
html.P(
"Alzheimer's is the most common cause of dementia, a general term for memory loss and other cognitive abilities serious enough to interfere with daily life. Alzheimer's disease accounts for 60-80% of dementia cases.Alzheimer's is not a normal part of aging. The greatest known risk factor is increasing age, and the majority of people with Alzheimer's are 65 and older. Alzheimer’s disease is considered to be younger-onset Alzheimer’s if it affects a person under 65. Younger-onset can also be referred to as early-onset Alzheimer’s. People with younger-onset Alzheimer’s can be in the early, middle or late stage of the disease.",
className="card-text",
),
html.Center([
html.H2("Alzheimer types Plot", className="card-subtitle"),
dcc.Graph(id='plot2', figure = fig2 )
])
# dbc.CardLink("Card link", href="#"),
# dbc.CardLink("External link", href="https://google.com"),
# dcc.Dropdown(
# id='my-dropdown',
# options=[
# {'label': 'Coke', 'value': 'COKE'},
# {'label': 'Tesla', 'value': 'TSLA'},
# {'label': 'Apple', 'value': 'AAPL'}
# ],
# value='COKE'
# ),
# dcc.Graph(id='my-graph'),
# dcc.Store(id='user-store'),
]
),
style={"width": "500", "margin": "10px", "color" : "white" ,"background-color":"black"},
)
))),
dbc.Row(
[
dbc.Col(html.Div(
dbc.Card(
dbc.CardBody(
[
html.H4("Patient Types", className="card-title"),
html.H6("(Alzheimers)", className="card-subtitle"),
html.P(
"The following denotes the distribution of patients in the dataset",
className="card-text",
),
dbc.CardLink("Read more...", href="#"),
dcc.Graph(id='plot3', figure = pieChartFig ),
]
),
style={"width": "500", "margin": "10px", "color" : "white" ,"background-color":"#323232"},
)
)),
dbc.Col(html.Div(
dbc.Card(
dbc.CardBody(
[
html.H4("Gender Proportion", className="card-title"),
html.H6("(Alzheimers)", className="card-subtitle"),
html.P(
"The following shows the gender proportion of Alzerimers patients",
className="card-text",
),
dbc.CardLink("Read more...", href="#"),
dcc.Graph(id='plot4', figure = pieChartFig2 ),
]
),
style={"width": "500", "margin": "10px", "color" : "white" ,"background-color":"#323232"},
)
)),
dbc.Col(html.Div(
dbc.Card(
dbc.CardBody(
[
html.H4("Clinical Dimentia Rating", className="card-title"),
html.H6("(Alzheimers)", className="card-subtitle"),
html.P(
"CDR score affect mainly to predict the alzheimers patients",
className="card-text",
),
dbc.CardLink("Read more...", href="#"),
dcc.Graph(id='plot5', figure = pieChartFig3 , style={"plot_bgcolor":"rgba(0,0,0,0)"}),
]
),
style={"width": "500", "margin": "10px", "color" : "white" ,"background-color":"#323232"},
)
)),
]
)
]),
html.Div(
[
html.H4("COVID-19", className="card-title",style={"color": "white", "margin": "10px"}),
dbc.Row(
[
dbc.Col(html.Div(
dbc.Card(
dbc.CardBody(
[
html.H4("Total visits", className="card-title"),
html.H1(covidDatalength, className="card-subtitle"),
# html.P(
# "Some quick example text to build on the card title and make "
# "up the bulk of the card's content.",
# className="card-text",
# )
]
),
style={"width": "500", "margin": "10px","color": "white" ,"background-color":"#2a2a72", "border-radius":"20px"},
)
)),
dbc.Col(html.Div(
dbc.Card(
dbc.CardBody(
[
html.H4("Confirmed cases", className="card-title"),
html.H1("1005", className="card-subtitle"),
# html.P(
# "Some quick example text to build on the card title and make "
# "up the bulk of the card's content.",
# className="card-text",
# ),
# dbc.CardLink("Card link", href="#"),
# dbc.CardLink("External link", href="https://google.com"),
]
),
style={"width": "500", "margin": "10px", "color": "white" ,"background-color":"red", "border-radius":"20px"},
)
)),
dbc.Col(html.Div(
dbc.Card(
dbc.CardBody(
[
html.H4("Non- Critical", className="card-title"),
html.H1("1570", className="card-subtitle"),
# html.P(
# "Some quick example text to build on the card title and make "
# "up the bulk of the card's content.",
# className="card-text",
# ),
# dbc.CardLink("Card link", href="#"),
# dbc.CardLink("External link", href="https://google.com"),
]
),
style={"width": "500", "margin": "10px", "color": "white" ,"background-color":"green", "border-radius":"20px"},
)
)),
]
),
dbc.Row(dbc.Col(html.Div(
dbc.Card(
dbc.CardBody(
[
html.H4("Covid 19 data analysis", className="card-title"),
html.H6("", className="card-subtitle"),
html.P(
"Coronavirus disease 2019 (COVID-19), also known as COVID and the coronavirus, is a contagious disease caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2). The first known case was identified in Wuhan, China, in December 2019.The disease has since spread worldwide, leading to an ongoing pandemic. Symptoms of COVID-19 are variable, but often include fever, cough, headache, fatigue, breathing difficulties, and loss of smell and taste. Symptoms may begin one to fourteen days after exposure to the virus. At least a third of people who are infected do not develop noticeable symptoms. Of those people who develop symptoms noticeable enough to be classed as patients, most (81%) develop mild to moderate symptoms (up to mild pneumonia), while 14% develop severe symptoms",
className="card-text",
),
html.Center([
dcc.Graph(id='plot6', figure = figc1 , style={"plot_bgcolor":"rgba(0,0,0,0)"}),
])
# dbc.CardLink("Card link", href="#"),
# dbc.CardLink("External link", href="https://google.com"),
# dcc.Dropdown(
# id='my-dropdown',
# options=[
# {'label': 'Coke', 'value': 'COKE'},
# {'label': 'Tesla', 'value': 'TSLA'},
# {'label': 'Apple', 'value': 'AAPL'}
# ],
# value='COKE'
# ),
# dcc.Graph(id='my-graph'),
# dcc.Store(id='user-store'),
]
),
style={"width": "500", "margin": "10px", "color" : "white" ,"background-color":"black"},
)
))),
dbc.Row(
[
dbc.Col(html.Div(
dbc.Card(
dbc.CardBody(
[
html.H4("Gender Variations", className="card-title"),
html.H6("(COVID-19)", className="card-subtitle"),
html.P(
"The following shows the gender diversity among covid patients.",
className="card-text",
),
dbc.CardLink("Read more...", href="#"),
dcc.Graph(id='plot7', figure = figc2 , style={"plot_bgcolor":"rgba(0,0,0,0)"}),
]
),
style={"width": "200", "margin": "10px", "color" : "white" ,"background-color":"#323232"},
)
)),
dbc.Col(html.Div(
dbc.Card(
dbc.CardBody(
[
html.H4("Fever level variation", className="card-title"),
html.H6("(COVID-19)", className="card-subtitle"),
html.P(
"Fever level of among covid patients is displayed below with respective tho the tempurature in Farenheit",
className="card-text",
),
dbc.CardLink("Read more...", href="#"),
dcc.Graph(id='plot8', figure = figc3 , style={"plot_bgcolor":"rgba(0,0,0,0)"}),
]
),
style={"width": "200", "margin": "10px", "color" : "white" ,"background-color":"#323232"},
)
)),
]
),
]
)
# html.H1(id='username'),
# html.H1('Dashboard'),
# dcc.Dropdown(
# id='my-dropdown',
# options=[
# {'label': 'Coke', 'value': 'COKE'},
# {'label': 'Tesla', 'value': 'TSLA'},
# {'label': 'Apple', 'value': 'AAPL'}
# ],
# value='COKE'
# ),
# dcc.Graph(id='my-graph'),
# dcc.Store(id='user-store'),
], style={'width': '500', "background-color": "black", "padding" : "20px"})
| from dash import dcc
from dash import html
import dash_bootstrap_components as dbc
from ..Plots import alzheimer_clusterringPlot as ac
import plotly.graph_objs as go
import plotly.express as px
import pandas as pd
import os
# navbar =
fig2 = ac.alzheimer_clusterPlot()
fig2.update_layout({
"plot_bgcolor": "rgba(0, 0, 0, 0)",
"paper_bgcolor": "rgba(0, 0, 0, 0)",
"font_color":"white",
"legend_font_color" : "white"
})
#data retrieval alzheimer
alzheimerlength = pd.read_csv("app/datasets/alzheimer.csv")
alzheimerdatalength = len(alzheimerlength)
dementedPatients = alzheimerlength[alzheimerlength['Group'].str.contains('Demented')]
dementedPatients = len(dementedPatients)
noncritical = alzheimerlength[alzheimerlength['Group'].str.contains('Converted')]
noncritical = len(noncritical)
#data retrieval covid 19
covidlength = pd.read_csv("app/datasets/covid-19 symptoms dataset.csv")
covidDatalength = len(covidlength)
# covidPatients = covidlength[covidlength['infectionProb'].str.contains(1)]
#pie chart plot
pieChartFig = px.pie(alzheimerlength, values='MMSE', names='Group')
pieChartFig.update_layout({
"plot_bgcolor": "rgba(0, 0, 0, 0)",
"paper_bgcolor": "rgba(0, 0, 0, 0)",
"font_color":"white",
"legend_font_color" : "white"
})
pieChartFig2 = px.pie(alzheimerlength, names='M/F', color='M/F', color_discrete_sequence=px.colors.sequential.RdBu)
pieChartFig2.update_layout({
"plot_bgcolor": "rgba(0, 0, 0, 0)",
"paper_bgcolor": "rgba(0, 0, 0, 0)",
"font_color":"white",
"legend_font_color" : "white"
})
pieChartFig3 = px.pie(alzheimerlength, names='CDR', color_discrete_sequence=px.colors.sequential.RdBu)
pieChartFig3.update_layout({
"plot_bgcolor": "rgba(0, 0, 0, 0)",
"paper_bgcolor": "rgba(0, 0, 0, 0)",
"font_color":"white",
"legend_font_color" : "white"
})
#covid plots
a = ['Total cases', 'Confirmed cases', 'Non-critical cases']
figc1 = go.Figure([go.Bar(x=a, y=[2575, 1005, 1570])])
figc1.update_layout({
"plot_bgcolor": "rgba(0, 0, 0, 0)",
"paper_bgcolor": "rgba(0, 0, 0, 0)",
"font_color":"white",
"legend_font_color" : "white",
# "coloraxis" : "red"
})
figc2 = px.pie(covidlength, names='bodyPain', color='bodyPain', color_discrete_sequence=px.colors.sequential.RdBu)
figc3 = px.pie(covidlength, names='fever', color='fever', color_discrete_sequence=px.colors.sequential.RdBu)
df = px.data.iris()
fig = px.scatter_3d(df, x='sepal_length', y='sepal_width', z='petal_width',
color='species')
layout = html.Div(id='main', children=[
dbc.NavbarSimple(
children=[
dbc.NavItem(dbc.NavLink("Page 1", href="#")),
dbc.DropdownMenu(
children=[
dbc.DropdownMenuItem("More pages", header=True),
dbc.DropdownMenuItem("Page 2", href="#"),
dbc.DropdownMenuItem("Page 3", href="#"),
],
nav=True,
in_navbar=True,
label="More",
),
],
brand="AI Doc Dasboard",
brand_href="#",
color="black",
dark=True,
),
html.Div(
[
html.H4("Alzheimer", className="card-title",style={"color": "white", "margin": "10px"}),
dbc.Row(
[
dbc.Col(html.Div(
dbc.Card(
dbc.CardBody(
[
html.H4("Total visits", className="card-title"),
html.H1(alzheimerdatalength, className="card-subtitle"),
# html.P(
# "Some quick example text to build on the card title and make "
# "up the bulk of the card's content.",
# className="card-text",
# )
]
),
style={"width": "500", "margin": "10px","color": "white" ,"background-color":"#2a2a72", "border-radius":"20px"},
)
)),
dbc.Col(html.Div(
dbc.Card(
dbc.CardBody(
[
html.H4("Confirmed cases", className="card-title"),
html.H1(dementedPatients, className="card-subtitle"),
# html.P(
# "Some quick example text to build on the card title and make "
# "up the bulk of the card's content.",
# className="card-text",
# ),
# dbc.CardLink("Card link", href="#"),
# dbc.CardLink("External link", href="https://google.com"),
]
),
style={"width": "500", "margin": "10px", "color": "white" ,"background-color":"red", "border-radius":"20px"},
)
)),
dbc.Col(html.Div(
dbc.Card(
dbc.CardBody(
[
html.H4("Non- Critical", className="card-title"),
html.H1(noncritical, className="card-subtitle"),
# html.P(
# "Some quick example text to build on the card title and make "
# "up the bulk of the card's content.",
# className="card-text",
# ),
# dbc.CardLink("Card link", href="#"),
# dbc.CardLink("External link", href="https://google.com"),
]
),
style={"width": "500", "margin": "10px", "color": "white" ,"background-color":"green", "border-radius":"20px"},
)
)),
]
),
dbc.Row(dbc.Col(html.Div(
dbc.Card(
dbc.CardBody(
[
html.H4("", className="card-title"),
html.P(
"Alzheimer's is the most common cause of dementia, a general term for memory loss and other cognitive abilities serious enough to interfere with daily life. Alzheimer's disease accounts for 60-80% of dementia cases.Alzheimer's is not a normal part of aging. The greatest known risk factor is increasing age, and the majority of people with Alzheimer's are 65 and older. Alzheimer’s disease is considered to be younger-onset Alzheimer’s if it affects a person under 65. Younger-onset can also be referred to as early-onset Alzheimer’s. People with younger-onset Alzheimer’s can be in the early, middle or late stage of the disease.",
className="card-text",
),
html.Center([
html.H2("Alzheimer types Plot", className="card-subtitle"),
dcc.Graph(id='plot2', figure = fig2 )
])
# dbc.CardLink("Card link", href="#"),
# dbc.CardLink("External link", href="https://google.com"),
# dcc.Dropdown(
# id='my-dropdown',
# options=[
# {'label': 'Coke', 'value': 'COKE'},
# {'label': 'Tesla', 'value': 'TSLA'},
# {'label': 'Apple', 'value': 'AAPL'}
# ],
# value='COKE'
# ),
# dcc.Graph(id='my-graph'),
# dcc.Store(id='user-store'),
]
),
style={"width": "500", "margin": "10px", "color" : "white" ,"background-color":"black"},
)
))),
dbc.Row(
[
dbc.Col(html.Div(
dbc.Card(
dbc.CardBody(
[
html.H4("Patient Types", className="card-title"),
html.H6("(Alzheimers)", className="card-subtitle"),
html.P(
"The following denotes the distribution of patients in the dataset",
className="card-text",
),
dbc.CardLink("Read more...", href="#"),
dcc.Graph(id='plot3', figure = pieChartFig ),
]
),
style={"width": "500", "margin": "10px", "color" : "white" ,"background-color":"#323232"},
)
)),
dbc.Col(html.Div(
dbc.Card(
dbc.CardBody(
[
html.H4("Gender Proportion", className="card-title"),
html.H6("(Alzheimers)", className="card-subtitle"),
html.P(
"The following shows the gender proportion of Alzerimers patients",
className="card-text",
),
dbc.CardLink("Read more...", href="#"),
dcc.Graph(id='plot4', figure = pieChartFig2 ),
]
),
style={"width": "500", "margin": "10px", "color" : "white" ,"background-color":"#323232"},
)
)),
dbc.Col(html.Div(
dbc.Card(
dbc.CardBody(
[
html.H4("Clinical Dimentia Rating", className="card-title"),
html.H6("(Alzheimers)", className="card-subtitle"),
html.P(
"CDR score affect mainly to predict the alzheimers patients",
className="card-text",
),
dbc.CardLink("Read more...", href="#"),
dcc.Graph(id='plot5', figure = pieChartFig3 , style={"plot_bgcolor":"rgba(0,0,0,0)"}),
]
),
style={"width": "500", "margin": "10px", "color" : "white" ,"background-color":"#323232"},
)
)),
]
)
]),
html.Div(
[
html.H4("COVID-19", className="card-title",style={"color": "white", "margin": "10px"}),
dbc.Row(
[
dbc.Col(html.Div(
dbc.Card(
dbc.CardBody(
[
html.H4("Total visits", className="card-title"),
html.H1(covidDatalength, className="card-subtitle"),
# html.P(
# "Some quick example text to build on the card title and make "
# "up the bulk of the card's content.",
# className="card-text",
# )
]
),
style={"width": "500", "margin": "10px","color": "white" ,"background-color":"#2a2a72", "border-radius":"20px"},
)
)),
dbc.Col(html.Div(
dbc.Card(
dbc.CardBody(
[
html.H4("Confirmed cases", className="card-title"),
html.H1("1005", className="card-subtitle"),
# html.P(
# "Some quick example text to build on the card title and make "
# "up the bulk of the card's content.",
# className="card-text",
# ),
# dbc.CardLink("Card link", href="#"),
# dbc.CardLink("External link", href="https://google.com"),
]
),
style={"width": "500", "margin": "10px", "color": "white" ,"background-color":"red", "border-radius":"20px"},
)
)),
dbc.Col(html.Div(
dbc.Card(
dbc.CardBody(
[
html.H4("Non- Critical", className="card-title"),
html.H1("1570", className="card-subtitle"),
# html.P(
# "Some quick example text to build on the card title and make "
# "up the bulk of the card's content.",
# className="card-text",
# ),
# dbc.CardLink("Card link", href="#"),
# dbc.CardLink("External link", href="https://google.com"),
]
),
style={"width": "500", "margin": "10px", "color": "white" ,"background-color":"green", "border-radius":"20px"},
)
)),
]
),
dbc.Row(dbc.Col(html.Div(
dbc.Card(
dbc.CardBody(
[
html.H4("Covid 19 data analysis", className="card-title"),
html.H6("", className="card-subtitle"),
html.P(
"Coronavirus disease 2019 (COVID-19), also known as COVID and the coronavirus, is a contagious disease caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2). The first known case was identified in Wuhan, China, in December 2019.The disease has since spread worldwide, leading to an ongoing pandemic. Symptoms of COVID-19 are variable, but often include fever, cough, headache, fatigue, breathing difficulties, and loss of smell and taste. Symptoms may begin one to fourteen days after exposure to the virus. At least a third of people who are infected do not develop noticeable symptoms. Of those people who develop symptoms noticeable enough to be classed as patients, most (81%) develop mild to moderate symptoms (up to mild pneumonia), while 14% develop severe symptoms",
className="card-text",
),
html.Center([
dcc.Graph(id='plot6', figure = figc1 , style={"plot_bgcolor":"rgba(0,0,0,0)"}),
])
# dbc.CardLink("Card link", href="#"),
# dbc.CardLink("External link", href="https://google.com"),
# dcc.Dropdown(
# id='my-dropdown',
# options=[
# {'label': 'Coke', 'value': 'COKE'},
# {'label': 'Tesla', 'value': 'TSLA'},
# {'label': 'Apple', 'value': 'AAPL'}
# ],
# value='COKE'
# ),
# dcc.Graph(id='my-graph'),
# dcc.Store(id='user-store'),
]
),
style={"width": "500", "margin": "10px", "color" : "white" ,"background-color":"black"},
)
))),
dbc.Row(
[
dbc.Col(html.Div(
dbc.Card(
dbc.CardBody(
[
html.H4("Gender Variations", className="card-title"),
html.H6("(COVID-19)", className="card-subtitle"),
html.P(
"The following shows the gender diversity among covid patients.",
className="card-text",
),
dbc.CardLink("Read more...", href="#"),
dcc.Graph(id='plot7', figure = figc2 , style={"plot_bgcolor":"rgba(0,0,0,0)"}),
]
),
style={"width": "200", "margin": "10px", "color" : "white" ,"background-color":"#323232"},
)
)),
dbc.Col(html.Div(
dbc.Card(
dbc.CardBody(
[
html.H4("Fever level variation", className="card-title"),
html.H6("(COVID-19)", className="card-subtitle"),
html.P(
"Fever level of among covid patients is displayed below with respective tho the tempurature in Farenheit",
className="card-text",
),
dbc.CardLink("Read more...", href="#"),
dcc.Graph(id='plot8', figure = figc3 , style={"plot_bgcolor":"rgba(0,0,0,0)"}),
]
),
style={"width": "200", "margin": "10px", "color" : "white" ,"background-color":"#323232"},
)
)),
]
),
]
)
# html.H1(id='username'),
# html.H1('Dashboard'),
# dcc.Dropdown(
# id='my-dropdown',
# options=[
# {'label': 'Coke', 'value': 'COKE'},
# {'label': 'Tesla', 'value': 'TSLA'},
# {'label': 'Apple', 'value': 'AAPL'}
# ],
# value='COKE'
# ),
# dcc.Graph(id='my-graph'),
# dcc.Store(id='user-store'),
], style={'width': '500', "background-color": "black", "padding" : "20px"})
| en | 0.280307 | # navbar = #data retrieval alzheimer #data retrieval covid 19 # covidPatients = covidlength[covidlength['infectionProb'].str.contains(1)] #pie chart plot #covid plots # "coloraxis" : "red" # html.P( # "Some quick example text to build on the card title and make " # "up the bulk of the card's content.", # className="card-text", # ) # html.P( # "Some quick example text to build on the card title and make " # "up the bulk of the card's content.", # className="card-text", # ), # dbc.CardLink("Card link", href="#"), # dbc.CardLink("External link", href="https://google.com"), # html.P( # "Some quick example text to build on the card title and make " # "up the bulk of the card's content.", # className="card-text", # ), # dbc.CardLink("Card link", href="#"), # dbc.CardLink("External link", href="https://google.com"), # dbc.CardLink("Card link", href="#"), # dbc.CardLink("External link", href="https://google.com"), # dcc.Dropdown( # id='my-dropdown', # options=[ # {'label': 'Coke', 'value': 'COKE'}, # {'label': 'Tesla', 'value': 'TSLA'}, # {'label': 'Apple', 'value': 'AAPL'} # ], # value='COKE' # ), # dcc.Graph(id='my-graph'), # dcc.Store(id='user-store'), # html.P( # "Some quick example text to build on the card title and make " # "up the bulk of the card's content.", # className="card-text", # ) # html.P( # "Some quick example text to build on the card title and make " # "up the bulk of the card's content.", # className="card-text", # ), # dbc.CardLink("Card link", href="#"), # dbc.CardLink("External link", href="https://google.com"), # html.P( # "Some quick example text to build on the card title and make " # "up the bulk of the card's content.", # className="card-text", # ), # dbc.CardLink("Card link", href="#"), # dbc.CardLink("External link", href="https://google.com"), # dbc.CardLink("Card link", href="#"), # dbc.CardLink("External link", href="https://google.com"), # dcc.Dropdown( # id='my-dropdown', # options=[ # {'label': 'Coke', 'value': 'COKE'}, # {'label': 'Tesla', 'value': 'TSLA'}, # {'label': 'Apple', 'value': 'AAPL'} # ], # value='COKE' # ), # dcc.Graph(id='my-graph'), # dcc.Store(id='user-store'), # html.H1(id='username'), # html.H1('Dashboard'), # dcc.Dropdown( # id='my-dropdown', # options=[ # {'label': 'Coke', 'value': 'COKE'}, # {'label': 'Tesla', 'value': 'TSLA'}, # {'label': 'Apple', 'value': 'AAPL'} # ], # value='COKE' # ), # dcc.Graph(id='my-graph'), # dcc.Store(id='user-store'), | 2.514503 | 3 |
ormar/queryset/actions/__init__.py | dudil/ormar | 0 | 6621426 | from ormar.queryset.actions.filter_action import FilterAction
from ormar.queryset.actions.order_action import OrderAction
__all__ = ["FilterAction", "OrderAction"]
| from ormar.queryset.actions.filter_action import FilterAction
from ormar.queryset.actions.order_action import OrderAction
__all__ = ["FilterAction", "OrderAction"]
| none | 1 | 1.169389 | 1 | |
api/common/decorators.py | khashimakhun/projectNew | 5 | 6621427 | <gh_stars>1-10
import json
import logging
from django.http import HttpResponse
from api.common.exceptions import BotException
def handle_exception(method):
def method_wrapper(*args, **kwargs):
try:
return method(*args, **kwargs)
except Exception as ex:
exception_message = str(ex)
if isinstance(ex, BotException):
print(exception_message)
status = 200
else:
logging.exception(exception_message)
status = 500
return HttpResponse(json.dumps(exception_message), status=status, content_type='application/json')
return method_wrapper
| import json
import logging
from django.http import HttpResponse
from api.common.exceptions import BotException
def handle_exception(method):
def method_wrapper(*args, **kwargs):
try:
return method(*args, **kwargs)
except Exception as ex:
exception_message = str(ex)
if isinstance(ex, BotException):
print(exception_message)
status = 200
else:
logging.exception(exception_message)
status = 500
return HttpResponse(json.dumps(exception_message), status=status, content_type='application/json')
return method_wrapper | none | 1 | 2.332335 | 2 | |
tests.py | ltblueberry/directory-sorter | 0 | 6621428 | import unittest
from os import path, listdir
from directory_sorter import main as script
from directory_sorter import messages
class ScriptTest(unittest.TestCase):
def test_dir_empty(self):
exit_message = script(None)
self.assertEqual(exit_message, messages.NONE_DIR)
def test_dir_not_found(self):
directory = "no_directory"
exit_message = script(directory)
self.assertEqual(exit_message, messages.DIR_NOT_FOUND.format(
path.abspath(directory)))
def test_is_not_dir(self):
not_directory = "testfile"
exit_message = script(not_directory)
self.assertEqual(exit_message, messages.IS_NOT_DIR.format(
path.abspath(not_directory)))
def test_sorting(self):
directory = "test"
exit_message = script(directory)
if exit_message != messages.DONE:
print "Script finished with unexpected message"
self.assertTrue(False)
return
files = [f for f in listdir(
directory) if path.isfile(path.join(directory, f))]
# check there are no files in directory
if files:
print "There are unmoved files after script finished"
self.assertTrue(False)
return
sub_directories = {"no_extension": 1, "png": 1, "txt": 2}
for key in sub_directories:
moved_files = listdir(path.join(directory, key))
if len(moved_files) != sub_directories[key]:
print "{} directory has unexpected files count ({} != {})".format(
key, len(moved_files), sub_directories[key])
self.assertTrue(False)
if __name__ == '__main__':
unittest.main()
| import unittest
from os import path, listdir
from directory_sorter import main as script
from directory_sorter import messages
class ScriptTest(unittest.TestCase):
def test_dir_empty(self):
exit_message = script(None)
self.assertEqual(exit_message, messages.NONE_DIR)
def test_dir_not_found(self):
directory = "no_directory"
exit_message = script(directory)
self.assertEqual(exit_message, messages.DIR_NOT_FOUND.format(
path.abspath(directory)))
def test_is_not_dir(self):
not_directory = "testfile"
exit_message = script(not_directory)
self.assertEqual(exit_message, messages.IS_NOT_DIR.format(
path.abspath(not_directory)))
def test_sorting(self):
directory = "test"
exit_message = script(directory)
if exit_message != messages.DONE:
print "Script finished with unexpected message"
self.assertTrue(False)
return
files = [f for f in listdir(
directory) if path.isfile(path.join(directory, f))]
# check there are no files in directory
if files:
print "There are unmoved files after script finished"
self.assertTrue(False)
return
sub_directories = {"no_extension": 1, "png": 1, "txt": 2}
for key in sub_directories:
moved_files = listdir(path.join(directory, key))
if len(moved_files) != sub_directories[key]:
print "{} directory has unexpected files count ({} != {})".format(
key, len(moved_files), sub_directories[key])
self.assertTrue(False)
if __name__ == '__main__':
unittest.main()
| en | 0.864268 | # check there are no files in directory | 3.176763 | 3 |
python_boilerplate_test01/tests/test_os_utils.py | czbiohub/python_boilerplate_test01 | 0 | 6621429 | import os
import pytest
def test_sanitize_path():
from python_boilerplate_test01.os_utils import sanitize_path
test = sanitize_path('.')
true = os.path.abspath('.')
assert test == true
def test_maybe_add_slash():
from python_boilerplate_test01.os_utils import maybe_add_slash
test = maybe_add_slash("test-folder")
assert test == 'test-folder/'
def test_get_stdout_from_command():
from python_boilerplate_test01.os_utils import get_stdout_from_command
command = ['echo', 'asdf']
stdout = get_stdout_from_command(command)
assert stdout == ['asdf']
def test_get_stdout_stderr_from_command():
from python_boilerplate_test01.os_utils import get_stdout_stderr_from_command
command = ['sed', 'asdf']
stdout, stderr = get_stdout_stderr_from_command(command)
assert stdout == []
assert stderr == ['sed: 1: "asdf": command a expects \ followed by text']
| import os
import pytest
def test_sanitize_path():
from python_boilerplate_test01.os_utils import sanitize_path
test = sanitize_path('.')
true = os.path.abspath('.')
assert test == true
def test_maybe_add_slash():
from python_boilerplate_test01.os_utils import maybe_add_slash
test = maybe_add_slash("test-folder")
assert test == 'test-folder/'
def test_get_stdout_from_command():
from python_boilerplate_test01.os_utils import get_stdout_from_command
command = ['echo', 'asdf']
stdout = get_stdout_from_command(command)
assert stdout == ['asdf']
def test_get_stdout_stderr_from_command():
from python_boilerplate_test01.os_utils import get_stdout_stderr_from_command
command = ['sed', 'asdf']
stdout, stderr = get_stdout_stderr_from_command(command)
assert stdout == []
assert stderr == ['sed: 1: "asdf": command a expects \ followed by text']
| none | 1 | 2.383559 | 2 | |
src/python/transducer_data.py | dptam/neural_wfst | 0 | 6621430 | '''
| Filename : transducer_data.py
| Description : Functions that return the data fe to the transducer.
| Author : <NAME>
| Created : Tue Dec 8 17:50:51 2015 (-0500)
| Last-Updated: Thu Dec 31 01:08:44 2015 (-0500)
| By: <NAME>
| Update #: 22
'''
import codecs
import numpy
import string
import rasengan
import util_lstm_seqlabel
import warnings
import functools
BOS_CHAR = '^'
def read_data(file_name):
"""
Helper function
"""
lst = []
with codecs.open(file_name, 'r', encoding='utf8') as f:
for line in f:
line = line.strip()
if line == '':
continue
(one, two, *rest) = line.split("\t")
lst.append((one, two))
return lst
def numerize(lst, Sigma, win):
" Takes the string-valued training data and interns it "
lst_prime = []
bos_idx = len(Sigma)
for one, two in lst:
one_prime = numpy.asarray(
util_lstm_seqlabel.conv_x(
[Sigma[x] for x in one], win, bos_idx),
dtype=numpy.int32)
two_prime = numpy.asarray(
[Sigma[x] for x in two],
dtype=numpy.int32)
lst_prime.append((one_prime, two_prime))
return lst_prime
def int2str(lst, Sigma_inv):
" Converts a list of integers to a string "
_string = ""
for x in lst:
_string += Sigma_inv[x]
return _string
def get_lst_char(data_tuple_list):
lst_char = list(set(functools.reduce(
lambda x, y: x + y[0] + y[1], data_tuple_list, '')))
for e in list(set(string.ascii_letters.lower())):
e = str(e)
if e not in lst_char:
lst_char.append(e)
if(BOS_CHAR in lst_char):
lst_char.remove(BOS_CHAR)
assert BOS_CHAR not in lst_char
lst_char.insert(0, BOS_CHAR)
return lst_char
def add_bos(data_tuple_list):
'''
The BOS_CHAR is added to the left portion of the data, that is transduced
so that my LSTM can produce (1 + length) dimensional tensor, which is then
used by the cython transducer.
'''
return [(BOS_CHAR + a, b) for a,b in data_tuple_list]
def main(args):
with rasengan.debug_support():
with rasengan.tictoc("Loading Data"):
data_list = rasengan.namespacer(
read_data(args.train_fn))
val_data_list = rasengan.namespacer(
read_data(args.dev_fn))
if args.partition_dev_into_train > 0:
lim = args.partition_dev_into_test
data_list.extend(val_data_list[lim:])
val_data_list = val_data_list[:lim]
if args.partition_dev_into_test > 0:
lim = args.partition_dev_into_test
test_data_list = val_data_list[lim:]
val_data_list = val_data_list[:lim]
else:
test_data_list = rasengan.namespacer(
read_data(args.test_fn))
# data_list = val_data_list = [(u'jason', u'eisner')]
lst_char = get_lst_char(data_list
+ val_data_list
+ test_data_list)
data_list = add_bos(data_list)
val_data_list = add_bos(val_data_list)
test_data_list = add_bos(test_data_list)
# warnings.warn('''
# NOTE: While preparing sigma, we add 1 to the index
# returned by enumerate because the transducer unit that
# Ryan wrote uses index 0 as the index for the epsilon
# symbol. So essentially the epsilon symbol and the
# integer 0 are reserved symbols that cannot appear in the
# vocabulary.
# ALSO, we need to add 1 to the vocsize because of that.
# ''')
# sigma :: char -> int
sigma = dict((b, a+1) for (a,b) in enumerate(lst_char))
# sigma_inv :: int -> char
sigma_inv = dict((a+1, b) for (a,b) in enumerate(lst_char))
if args.limit_corpus > 0:
data_list = data_list[:args.limit_corpus]
train_data = numerize(data_list, sigma, args.win)
val_data = numerize(val_data_list, sigma, args.win)
test_data = numerize(test_data_list, sigma, args.win)
data = rasengan.Namespace()
#-------------------------------------------------------------#
# Add sets that would be used by the tensorflow seq2seq #
# model. See~$PY/tensorflow/models/rnn/translate/translate.py #
#-------------------------------------------------------------#
data.train_data = data_list
data.val_data = val_data_list
data.test_data = test_data_list
data.train_set = train_data
data.dev_set = val_data
data.test_set = test_data
data.vocsize = len(sigma) + 1
data.idx2label = sigma_inv
data.label2idx = sigma
print("J")
data.train_lex = [e[0] for e in train_data]
data.train_y = [e[1] for e in train_data]
print("K")
data.valid_lex = [e[0] for e in val_data]
data.valid_y = util_lstm_seqlabel.convert_id_to_word(
[e[1] for e in val_data], data.idx2label)
print("L")
data.test_lex = [e[0] for e in test_data]
data.test_y = util_lstm_seqlabel.convert_id_to_word(
[e[1] for e in test_data], data.idx2label)
data.words_train = []
data.words_valid = []
data.words_test = []
return data
| '''
| Filename : transducer_data.py
| Description : Functions that return the data fe to the transducer.
| Author : <NAME>
| Created : Tue Dec 8 17:50:51 2015 (-0500)
| Last-Updated: Thu Dec 31 01:08:44 2015 (-0500)
| By: <NAME>
| Update #: 22
'''
import codecs
import numpy
import string
import rasengan
import util_lstm_seqlabel
import warnings
import functools
BOS_CHAR = '^'
def read_data(file_name):
"""
Helper function
"""
lst = []
with codecs.open(file_name, 'r', encoding='utf8') as f:
for line in f:
line = line.strip()
if line == '':
continue
(one, two, *rest) = line.split("\t")
lst.append((one, two))
return lst
def numerize(lst, Sigma, win):
" Takes the string-valued training data and interns it "
lst_prime = []
bos_idx = len(Sigma)
for one, two in lst:
one_prime = numpy.asarray(
util_lstm_seqlabel.conv_x(
[Sigma[x] for x in one], win, bos_idx),
dtype=numpy.int32)
two_prime = numpy.asarray(
[Sigma[x] for x in two],
dtype=numpy.int32)
lst_prime.append((one_prime, two_prime))
return lst_prime
def int2str(lst, Sigma_inv):
" Converts a list of integers to a string "
_string = ""
for x in lst:
_string += Sigma_inv[x]
return _string
def get_lst_char(data_tuple_list):
lst_char = list(set(functools.reduce(
lambda x, y: x + y[0] + y[1], data_tuple_list, '')))
for e in list(set(string.ascii_letters.lower())):
e = str(e)
if e not in lst_char:
lst_char.append(e)
if(BOS_CHAR in lst_char):
lst_char.remove(BOS_CHAR)
assert BOS_CHAR not in lst_char
lst_char.insert(0, BOS_CHAR)
return lst_char
def add_bos(data_tuple_list):
'''
The BOS_CHAR is added to the left portion of the data, that is transduced
so that my LSTM can produce (1 + length) dimensional tensor, which is then
used by the cython transducer.
'''
return [(BOS_CHAR + a, b) for a,b in data_tuple_list]
def main(args):
with rasengan.debug_support():
with rasengan.tictoc("Loading Data"):
data_list = rasengan.namespacer(
read_data(args.train_fn))
val_data_list = rasengan.namespacer(
read_data(args.dev_fn))
if args.partition_dev_into_train > 0:
lim = args.partition_dev_into_test
data_list.extend(val_data_list[lim:])
val_data_list = val_data_list[:lim]
if args.partition_dev_into_test > 0:
lim = args.partition_dev_into_test
test_data_list = val_data_list[lim:]
val_data_list = val_data_list[:lim]
else:
test_data_list = rasengan.namespacer(
read_data(args.test_fn))
# data_list = val_data_list = [(u'jason', u'eisner')]
lst_char = get_lst_char(data_list
+ val_data_list
+ test_data_list)
data_list = add_bos(data_list)
val_data_list = add_bos(val_data_list)
test_data_list = add_bos(test_data_list)
# warnings.warn('''
# NOTE: While preparing sigma, we add 1 to the index
# returned by enumerate because the transducer unit that
# Ryan wrote uses index 0 as the index for the epsilon
# symbol. So essentially the epsilon symbol and the
# integer 0 are reserved symbols that cannot appear in the
# vocabulary.
# ALSO, we need to add 1 to the vocsize because of that.
# ''')
# sigma :: char -> int
sigma = dict((b, a+1) for (a,b) in enumerate(lst_char))
# sigma_inv :: int -> char
sigma_inv = dict((a+1, b) for (a,b) in enumerate(lst_char))
if args.limit_corpus > 0:
data_list = data_list[:args.limit_corpus]
train_data = numerize(data_list, sigma, args.win)
val_data = numerize(val_data_list, sigma, args.win)
test_data = numerize(test_data_list, sigma, args.win)
data = rasengan.Namespace()
#-------------------------------------------------------------#
# Add sets that would be used by the tensorflow seq2seq #
# model. See~$PY/tensorflow/models/rnn/translate/translate.py #
#-------------------------------------------------------------#
data.train_data = data_list
data.val_data = val_data_list
data.test_data = test_data_list
data.train_set = train_data
data.dev_set = val_data
data.test_set = test_data
data.vocsize = len(sigma) + 1
data.idx2label = sigma_inv
data.label2idx = sigma
print("J")
data.train_lex = [e[0] for e in train_data]
data.train_y = [e[1] for e in train_data]
print("K")
data.valid_lex = [e[0] for e in val_data]
data.valid_y = util_lstm_seqlabel.convert_id_to_word(
[e[1] for e in val_data], data.idx2label)
print("L")
data.test_lex = [e[0] for e in test_data]
data.test_y = util_lstm_seqlabel.convert_id_to_word(
[e[1] for e in test_data], data.idx2label)
data.words_train = []
data.words_valid = []
data.words_test = []
return data
| en | 0.785239 | | Filename : transducer_data.py | Description : Functions that return the data fe to the transducer. | Author : <NAME> | Created : Tue Dec 8 17:50:51 2015 (-0500) | Last-Updated: Thu Dec 31 01:08:44 2015 (-0500) | By: <NAME> | Update #: 22 Helper function The BOS_CHAR is added to the left portion of the data, that is transduced so that my LSTM can produce (1 + length) dimensional tensor, which is then used by the cython transducer. # data_list = val_data_list = [(u'jason', u'eisner')] # warnings.warn(''' # NOTE: While preparing sigma, we add 1 to the index # returned by enumerate because the transducer unit that # Ryan wrote uses index 0 as the index for the epsilon # symbol. So essentially the epsilon symbol and the # integer 0 are reserved symbols that cannot appear in the # vocabulary. # ALSO, we need to add 1 to the vocsize because of that. # ''') # sigma :: char -> int # sigma_inv :: int -> char #-------------------------------------------------------------# # Add sets that would be used by the tensorflow seq2seq # # model. See~$PY/tensorflow/models/rnn/translate/translate.py # #-------------------------------------------------------------# | 3.130865 | 3 |
projects/nncrystal/utils/msgpack_file_loader.py | nncrystals/detectron2 | 0 | 6621431 | from typing import List
import msgpack
def load_msgpack_data(file_path):
"""
load msgpack from path and return generator returning stored frame data
"""
with open(file_path, "rb") as f:
unpacker = msgpack.Unpacker(f)
for unpacked in unpacker:
yield unpacked
def load_images_entries(file_path, required_index: List, index_list=None):
"""
:param file_path:
:param offsets:
:param index_list: optional prebuilt index
:return:
"""
if index_list is None:
index_list = msgpack_data_index(file_path)
with open(file_path, "rb") as f:
for index in required_index:
offset = index_list[index]
f.seek(offset)
unpacker = msgpack.Unpacker(f)
yield unpacker.unpack()
def msgpack_data_index(file_path):
"""
:param file_path:
:return: offset index
"""
offsets = []
offset = 0
with open(file_path, "rb") as f:
unpacker = msgpack.Unpacker(f)
for _ in unpacker:
offsets.append(offset)
offset = unpacker.tell()
return offsets
| from typing import List
import msgpack
def load_msgpack_data(file_path):
"""
load msgpack from path and return generator returning stored frame data
"""
with open(file_path, "rb") as f:
unpacker = msgpack.Unpacker(f)
for unpacked in unpacker:
yield unpacked
def load_images_entries(file_path, required_index: List, index_list=None):
"""
:param file_path:
:param offsets:
:param index_list: optional prebuilt index
:return:
"""
if index_list is None:
index_list = msgpack_data_index(file_path)
with open(file_path, "rb") as f:
for index in required_index:
offset = index_list[index]
f.seek(offset)
unpacker = msgpack.Unpacker(f)
yield unpacker.unpack()
def msgpack_data_index(file_path):
"""
:param file_path:
:return: offset index
"""
offsets = []
offset = 0
with open(file_path, "rb") as f:
unpacker = msgpack.Unpacker(f)
for _ in unpacker:
offsets.append(offset)
offset = unpacker.tell()
return offsets
| en | 0.412784 | load msgpack from path and return generator returning stored frame data :param file_path: :param offsets: :param index_list: optional prebuilt index :return: :param file_path: :return: offset index | 2.958694 | 3 |
django_nlf/__init__.py | hodossy/django-nlf | 0 | 6621432 | <reponame>hodossy/django-nlf
from .antlr import DjangoNLFLanguage, LanguageSyntaxError
from .types import Lookup, Operation, Expression, CompositeExpression, CustomFunction
__version__ = "0.0.1"
| from .antlr import DjangoNLFLanguage, LanguageSyntaxError
from .types import Lookup, Operation, Expression, CompositeExpression, CustomFunction
__version__ = "0.0.1" | none | 1 | 1.415344 | 1 | |
src/checkrs/market.py | timothyb0912/checkrs | 2 | 6621433 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Functions for plotting simulated vs observed market shares of each alternative.
"""
from __future__ import absolute_import
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sbn
from .plot_utils import _label_despine_save_and_show_plot
from .utils import progress
def _get_objects_for_market_share_plot(
x, sim_y, obs_y, x_label, y_label, display_dict=None
):
"""
Creates dataframes needed for the market share plot.
Parameters
----------
x : 1D ndarray.
Should contain the values of the discrete random value for each
alternative for each observation.
sim_y : 2D ndarray of zeros and ones.
Denotes the simulated choices based on one's model. `sim_y.shape[0]`
MUST equal `x.shape[0]`. There should be one column per simulation.
obs_y : 1D ndarray of zeros and ones.
The observed choices used to estimate one's model.
x_label, y_label : str, optional.
Denotes the x-axis and y-axis labels used on the plot. Defaults are 'X'
and 'Counts'.
display_dict : dict or None, optional.
If passed, will be used to override the default xtick-labels. Each key
should be a unique value in `x`. Each value should be the label to be
plotted.
Returns
-------
boxplot_df : pandas DataFrame.
Will contain an `x_label` and `y_label` column. There will be one row
per unique value in `x_label` per simulation. The `y_label` column will
contain the counts of the number of times the associated value in the
`x_label` column was simulated to be chosen.
obs_df : pandas DataFrame.
Will contain the same two columns as boxplot_df. There will be one row
per unique value in `x_label`. The values in the `y_label` column will
be the number of observations with the row's corresponding `x_label`
value.
"""
# Get the positions and counts of the chosen values of x
unique_pos = np.unique(x, return_index=True)[1]
# Determine the unique values in the x-array, in their original order.
unique_vals = x[np.sort(unique_pos)]
# Get the counts of the chosen values of x
_val_names, _val_counts = np.unique(x[obs_y == 1], return_counts=True)
obs_df = pd.DataFrame({x_label: _val_names, y_label: _val_counts})
# Initialize an array of the simulated number of observations per value
num_per_value_per_sim = np.empty((unique_vals.size, sim_y.shape[1]))
# Create the iterable for populating `num_per_value_per_sim`
iterator = progress(unique_vals, desc="Unique x-values")
# Populate the created array
for pos, val in enumerate(iterator):
# Determine the rows of x that have values of `val`.
row_idxs = np.where(x == val)[0]
# Get the simulated y values for the given value.
current_sim_y = sim_y[row_idxs, :]
# Store the total simulated number of observations equaling `val`
num_per_value_per_sim[pos, :] = current_sim_y.sum(axis=0)
####
# Convert the array of simulated counts per value of X to a dataframe
####
# Create an array with 1 row per unique value of x per simulation
long_vals = np.repeat(unique_vals, sim_y.shape[1])
# Convert the counts per unique value per simulation into a 1D array
long_counts = num_per_value_per_sim.ravel()
# Create a dataframe of the unique values of x and the simulated counts
boxplot_df = pd.DataFrame({x_label: long_vals, y_label: long_counts})
# Convert the unique values to names the user wants to display on the plot
if display_dict is not None:
boxplot_df[x_label] = boxplot_df[x_label].map(display_dict)
obs_df[x_label] = obs_df[x_label].map(display_dict)
# Also make the x_label values the index for the observed dataframe for
# later sorting.
obs_df.index = [str(v) for v in obs_df[x_label].values]
return boxplot_df, obs_df
def plot_simulated_market_shares(
x,
sim_y,
obs_y,
x_label="X",
y_label="Counts",
display_dict=None,
fig_and_ax=None,
figsize=(10, 6),
fontsize=12,
title=None,
box_color="white",
obs_color="#045a8d",
obs_marker="*",
obs_size=12,
obs_label="Observed",
output_file=None,
dpi=500,
show=True,
):
"""
Makes a 'market share' boxplot of the simulated distributions of a discrete
random variable versus the observed values of that variable. In particular,
plots the observed number of observations that had a given value of a
discrete variable versus the simulated distributions of how many
observations had the same value.
Parameters
----------
x : 1D ndarray.
Should contain the values of the discrete random value for each
alternative for each observation.
sim_y : 2D ndarray of zeros and ones.
Denotes the simulated choices based on one's model. `sim_y.shape[0]`
MUST equal `x.shape[0]`. There should be one column per simulation.
obs_y : 1D ndarray of zeros and ones.
The observed choices used to estimate one's model.
x_label, y_label : str, optional.
Denotes the x-axis and y-axis labels used on the plot. Defaults are 'X'
and 'Counts'.
display_dict : dict or None, optional.
If passed, will be used to override the default xtick-labels. Each key
should be a unique value in `x`. Each value should be the label to be
plotted.
fig_and_ax : list of matplotlib figure and axis, or `None`, optional.
Determines whether a new figure will be created for the plot or whether
the plot will be drawn on existing axes. If None, a new figure will be
created. Default is `None`.
figsize : 2-tuple of positive ints.
Determines the size of the created figure. Default == (10, 6).
fontsize : int or None, optional.
The fontsize to be used in the plot. Default is 12.
title : string or None, optional.
Denotes the title to be displayed for the plot. Default is None.
box_color, obs_color : valid matplotlib color argument, optional.
Denotes the color of the boxes on the boxplot and the color used to
plot the observed distribution of `x`. Default == 'white', '#045a8d'.
obs_marker : valid matplotlib marker argument, optional.
Determines the marker used to plot the observed distribution of `x`.
Default is '*'.
obs_size : int, optional.
Determines the size of the marker for the observed distribution
of `x`. Default is 12.
obs_label : str, optional.
Denotes the legend label used for the markers of the observed
distribution of `x`. Default is 'Observed'.
output_file : str, or None, optional.
Denotes the relative or absolute filepath (including the file format)
that is to be used to save the plot. If None, the plot will not be
saved to file. Default is None.
dpi : positive int, optional.
Denotes the number of 'dots per inch' for the saved figure. Will only
be used if `output_file is not None`. Default == 500.
show : bool, optional.
Determines whether the figure is shown after plotting is complete.
Default == True.
Returns
-------
None
"""
# Ensure the display dict has all possible values that are in x.
if display_dict is not None:
safe_display = {k: k for k in np.unique(x)}
safe_display.update(display_dict)
else:
safe_display = None
# Get the data needed for the plot
boxplot_df, obs_df = _get_objects_for_market_share_plot(
x, sim_y, obs_y, x_label, y_label, display_dict=safe_display
)
# Create or access the figure and axis on which the plot is to be drawn.
if fig_and_ax is None:
fig, ax = plt.subplots(1, figsize=figsize)
fig_and_ax = [fig, ax]
else:
fig, ax = fig_and_ax
# Create the desired boxplot plot
sbn.boxplot(x=x_label, y=y_label, data=boxplot_df, color=box_color, ax=ax)
# Reorder the observed values according to the order of the plot
plot_labels = [v.get_text() for v in ax.get_xticklabels()]
obs_df = obs_df.loc[plot_labels]
# Add the observed values on top the boxplot
sbn.stripplot(
x=x_label,
y=y_label,
data=obs_df,
ax=ax,
color=obs_color,
s=obs_size,
marker=obs_marker,
label=obs_label,
)
# Ensure that the xticklabels are of the correct fontsize
ax.set_xticklabels(ax.get_xticklabels(), fontsize=fontsize)
# Draw the legend, ensuring that we only have one entry.
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[:1], labels[:1], loc="best", fontsize=fontsize)
# Take care of boilerplate plotting necessities
_label_despine_save_and_show_plot(
x_label=x_label,
y_label=y_label,
fig_and_ax=fig_and_ax,
fontsize=fontsize,
y_rot=0,
y_pad=40,
title=title,
output_file=output_file,
show=show,
dpi=dpi,
)
return None
| # -*- coding: utf-8 -*-
"""
Functions for plotting simulated vs observed market shares of each alternative.
"""
from __future__ import absolute_import
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sbn
from .plot_utils import _label_despine_save_and_show_plot
from .utils import progress
def _get_objects_for_market_share_plot(
x, sim_y, obs_y, x_label, y_label, display_dict=None
):
"""
Creates dataframes needed for the market share plot.
Parameters
----------
x : 1D ndarray.
Should contain the values of the discrete random value for each
alternative for each observation.
sim_y : 2D ndarray of zeros and ones.
Denotes the simulated choices based on one's model. `sim_y.shape[0]`
MUST equal `x.shape[0]`. There should be one column per simulation.
obs_y : 1D ndarray of zeros and ones.
The observed choices used to estimate one's model.
x_label, y_label : str, optional.
Denotes the x-axis and y-axis labels used on the plot. Defaults are 'X'
and 'Counts'.
display_dict : dict or None, optional.
If passed, will be used to override the default xtick-labels. Each key
should be a unique value in `x`. Each value should be the label to be
plotted.
Returns
-------
boxplot_df : pandas DataFrame.
Will contain an `x_label` and `y_label` column. There will be one row
per unique value in `x_label` per simulation. The `y_label` column will
contain the counts of the number of times the associated value in the
`x_label` column was simulated to be chosen.
obs_df : pandas DataFrame.
Will contain the same two columns as boxplot_df. There will be one row
per unique value in `x_label`. The values in the `y_label` column will
be the number of observations with the row's corresponding `x_label`
value.
"""
# Get the positions and counts of the chosen values of x
unique_pos = np.unique(x, return_index=True)[1]
# Determine the unique values in the x-array, in their original order.
unique_vals = x[np.sort(unique_pos)]
# Get the counts of the chosen values of x
_val_names, _val_counts = np.unique(x[obs_y == 1], return_counts=True)
obs_df = pd.DataFrame({x_label: _val_names, y_label: _val_counts})
# Initialize an array of the simulated number of observations per value
num_per_value_per_sim = np.empty((unique_vals.size, sim_y.shape[1]))
# Create the iterable for populating `num_per_value_per_sim`
iterator = progress(unique_vals, desc="Unique x-values")
# Populate the created array
for pos, val in enumerate(iterator):
# Determine the rows of x that have values of `val`.
row_idxs = np.where(x == val)[0]
# Get the simulated y values for the given value.
current_sim_y = sim_y[row_idxs, :]
# Store the total simulated number of observations equaling `val`
num_per_value_per_sim[pos, :] = current_sim_y.sum(axis=0)
####
# Convert the array of simulated counts per value of X to a dataframe
####
# Create an array with 1 row per unique value of x per simulation
long_vals = np.repeat(unique_vals, sim_y.shape[1])
# Convert the counts per unique value per simulation into a 1D array
long_counts = num_per_value_per_sim.ravel()
# Create a dataframe of the unique values of x and the simulated counts
boxplot_df = pd.DataFrame({x_label: long_vals, y_label: long_counts})
# Convert the unique values to names the user wants to display on the plot
if display_dict is not None:
boxplot_df[x_label] = boxplot_df[x_label].map(display_dict)
obs_df[x_label] = obs_df[x_label].map(display_dict)
# Also make the x_label values the index for the observed dataframe for
# later sorting.
obs_df.index = [str(v) for v in obs_df[x_label].values]
return boxplot_df, obs_df
def plot_simulated_market_shares(
x,
sim_y,
obs_y,
x_label="X",
y_label="Counts",
display_dict=None,
fig_and_ax=None,
figsize=(10, 6),
fontsize=12,
title=None,
box_color="white",
obs_color="#045a8d",
obs_marker="*",
obs_size=12,
obs_label="Observed",
output_file=None,
dpi=500,
show=True,
):
"""
Makes a 'market share' boxplot of the simulated distributions of a discrete
random variable versus the observed values of that variable. In particular,
plots the observed number of observations that had a given value of a
discrete variable versus the simulated distributions of how many
observations had the same value.
Parameters
----------
x : 1D ndarray.
Should contain the values of the discrete random value for each
alternative for each observation.
sim_y : 2D ndarray of zeros and ones.
Denotes the simulated choices based on one's model. `sim_y.shape[0]`
MUST equal `x.shape[0]`. There should be one column per simulation.
obs_y : 1D ndarray of zeros and ones.
The observed choices used to estimate one's model.
x_label, y_label : str, optional.
Denotes the x-axis and y-axis labels used on the plot. Defaults are 'X'
and 'Counts'.
display_dict : dict or None, optional.
If passed, will be used to override the default xtick-labels. Each key
should be a unique value in `x`. Each value should be the label to be
plotted.
fig_and_ax : list of matplotlib figure and axis, or `None`, optional.
Determines whether a new figure will be created for the plot or whether
the plot will be drawn on existing axes. If None, a new figure will be
created. Default is `None`.
figsize : 2-tuple of positive ints.
Determines the size of the created figure. Default == (10, 6).
fontsize : int or None, optional.
The fontsize to be used in the plot. Default is 12.
title : string or None, optional.
Denotes the title to be displayed for the plot. Default is None.
box_color, obs_color : valid matplotlib color argument, optional.
Denotes the color of the boxes on the boxplot and the color used to
plot the observed distribution of `x`. Default == 'white', '#045a8d'.
obs_marker : valid matplotlib marker argument, optional.
Determines the marker used to plot the observed distribution of `x`.
Default is '*'.
obs_size : int, optional.
Determines the size of the marker for the observed distribution
of `x`. Default is 12.
obs_label : str, optional.
Denotes the legend label used for the markers of the observed
distribution of `x`. Default is 'Observed'.
output_file : str, or None, optional.
Denotes the relative or absolute filepath (including the file format)
that is to be used to save the plot. If None, the plot will not be
saved to file. Default is None.
dpi : positive int, optional.
Denotes the number of 'dots per inch' for the saved figure. Will only
be used if `output_file is not None`. Default == 500.
show : bool, optional.
Determines whether the figure is shown after plotting is complete.
Default == True.
Returns
-------
None
"""
# Ensure the display dict has all possible values that are in x.
if display_dict is not None:
safe_display = {k: k for k in np.unique(x)}
safe_display.update(display_dict)
else:
safe_display = None
# Get the data needed for the plot
boxplot_df, obs_df = _get_objects_for_market_share_plot(
x, sim_y, obs_y, x_label, y_label, display_dict=safe_display
)
# Create or access the figure and axis on which the plot is to be drawn.
if fig_and_ax is None:
fig, ax = plt.subplots(1, figsize=figsize)
fig_and_ax = [fig, ax]
else:
fig, ax = fig_and_ax
# Create the desired boxplot plot
sbn.boxplot(x=x_label, y=y_label, data=boxplot_df, color=box_color, ax=ax)
# Reorder the observed values according to the order of the plot
plot_labels = [v.get_text() for v in ax.get_xticklabels()]
obs_df = obs_df.loc[plot_labels]
# Add the observed values on top the boxplot
sbn.stripplot(
x=x_label,
y=y_label,
data=obs_df,
ax=ax,
color=obs_color,
s=obs_size,
marker=obs_marker,
label=obs_label,
)
# Ensure that the xticklabels are of the correct fontsize
ax.set_xticklabels(ax.get_xticklabels(), fontsize=fontsize)
# Draw the legend, ensuring that we only have one entry.
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[:1], labels[:1], loc="best", fontsize=fontsize)
# Take care of boilerplate plotting necessities
_label_despine_save_and_show_plot(
x_label=x_label,
y_label=y_label,
fig_and_ax=fig_and_ax,
fontsize=fontsize,
y_rot=0,
y_pad=40,
title=title,
output_file=output_file,
show=show,
dpi=dpi,
)
return None | en | 0.703528 | # -*- coding: utf-8 -*- Functions for plotting simulated vs observed market shares of each alternative. Creates dataframes needed for the market share plot. Parameters ---------- x : 1D ndarray. Should contain the values of the discrete random value for each alternative for each observation. sim_y : 2D ndarray of zeros and ones. Denotes the simulated choices based on one's model. `sim_y.shape[0]` MUST equal `x.shape[0]`. There should be one column per simulation. obs_y : 1D ndarray of zeros and ones. The observed choices used to estimate one's model. x_label, y_label : str, optional. Denotes the x-axis and y-axis labels used on the plot. Defaults are 'X' and 'Counts'. display_dict : dict or None, optional. If passed, will be used to override the default xtick-labels. Each key should be a unique value in `x`. Each value should be the label to be plotted. Returns ------- boxplot_df : pandas DataFrame. Will contain an `x_label` and `y_label` column. There will be one row per unique value in `x_label` per simulation. The `y_label` column will contain the counts of the number of times the associated value in the `x_label` column was simulated to be chosen. obs_df : pandas DataFrame. Will contain the same two columns as boxplot_df. There will be one row per unique value in `x_label`. The values in the `y_label` column will be the number of observations with the row's corresponding `x_label` value. # Get the positions and counts of the chosen values of x # Determine the unique values in the x-array, in their original order. # Get the counts of the chosen values of x # Initialize an array of the simulated number of observations per value # Create the iterable for populating `num_per_value_per_sim` # Populate the created array # Determine the rows of x that have values of `val`. # Get the simulated y values for the given value. # Store the total simulated number of observations equaling `val` #### # Convert the array of simulated counts per value of X to a dataframe #### # Create an array with 1 row per unique value of x per simulation # Convert the counts per unique value per simulation into a 1D array # Create a dataframe of the unique values of x and the simulated counts # Convert the unique values to names the user wants to display on the plot # Also make the x_label values the index for the observed dataframe for # later sorting. Makes a 'market share' boxplot of the simulated distributions of a discrete random variable versus the observed values of that variable. In particular, plots the observed number of observations that had a given value of a discrete variable versus the simulated distributions of how many observations had the same value. Parameters ---------- x : 1D ndarray. Should contain the values of the discrete random value for each alternative for each observation. sim_y : 2D ndarray of zeros and ones. Denotes the simulated choices based on one's model. `sim_y.shape[0]` MUST equal `x.shape[0]`. There should be one column per simulation. obs_y : 1D ndarray of zeros and ones. The observed choices used to estimate one's model. x_label, y_label : str, optional. Denotes the x-axis and y-axis labels used on the plot. Defaults are 'X' and 'Counts'. display_dict : dict or None, optional. If passed, will be used to override the default xtick-labels. Each key should be a unique value in `x`. Each value should be the label to be plotted. fig_and_ax : list of matplotlib figure and axis, or `None`, optional. Determines whether a new figure will be created for the plot or whether the plot will be drawn on existing axes. If None, a new figure will be created. Default is `None`. figsize : 2-tuple of positive ints. Determines the size of the created figure. Default == (10, 6). fontsize : int or None, optional. The fontsize to be used in the plot. Default is 12. title : string or None, optional. Denotes the title to be displayed for the plot. Default is None. box_color, obs_color : valid matplotlib color argument, optional. Denotes the color of the boxes on the boxplot and the color used to plot the observed distribution of `x`. Default == 'white', '#045a8d'. obs_marker : valid matplotlib marker argument, optional. Determines the marker used to plot the observed distribution of `x`. Default is '*'. obs_size : int, optional. Determines the size of the marker for the observed distribution of `x`. Default is 12. obs_label : str, optional. Denotes the legend label used for the markers of the observed distribution of `x`. Default is 'Observed'. output_file : str, or None, optional. Denotes the relative or absolute filepath (including the file format) that is to be used to save the plot. If None, the plot will not be saved to file. Default is None. dpi : positive int, optional. Denotes the number of 'dots per inch' for the saved figure. Will only be used if `output_file is not None`. Default == 500. show : bool, optional. Determines whether the figure is shown after plotting is complete. Default == True. Returns ------- None # Ensure the display dict has all possible values that are in x. # Get the data needed for the plot # Create or access the figure and axis on which the plot is to be drawn. # Create the desired boxplot plot # Reorder the observed values according to the order of the plot # Add the observed values on top the boxplot # Ensure that the xticklabels are of the correct fontsize # Draw the legend, ensuring that we only have one entry. # Take care of boilerplate plotting necessities | 3.416118 | 3 |
qcengine/programs/dftd4.py | MolSSI/dqm_compute | 105 | 6621434 | <gh_stars>100-1000
"""
Harness for the DFT-D4 dispersion correction.
This implementation interfaces with the dftd4 Python-API, which provides
native support for QCSchema.
Therefore, this harness only has to provide a thin wrapper to integrate dftd4.
"""
from typing import Dict
from qcelemental.models import AtomicInput, AtomicResult
from qcelemental.util import safe_version, which_import
from ..config import TaskConfig
from ..exceptions import InputError
from .empirical_dispersion_resources import from_arrays, get_dispersion_aliases
from .model import ProgramHarness
class DFTD4Harness(ProgramHarness):
"""Calculation harness for the DFT-D4 dispersion correction."""
_defaults = {
"name": "dftd4",
"scratch": False,
"thread_safe": True,
"thread_parallel": False,
"node_parallel": False,
"managed_memory": False,
}
version_cache: Dict[str, str] = {}
class Config(ProgramHarness.Config):
pass
@staticmethod
def found(raise_error: bool = False) -> bool:
"""Check for the availability of the Python API of dftd4"""
return which_import(
"dftd4",
return_bool=True,
raise_error=raise_error,
raise_msg="Please install via a dftd4 version with enabled Python API",
)
def get_version(self) -> str:
"""Return the currently used version of dftd4"""
self.found(raise_error=True)
which_prog = which_import("dftd4")
if which_prog not in self.version_cache:
import dftd4
self.version_cache[which_prog] = safe_version(dftd4.__version__)
return self.version_cache[which_prog]
def compute(self, input_model: AtomicInput, config: TaskConfig) -> AtomicResult:
"""
Actual interface to the dftd4 package. The compute function is just a thin
wrapper around the native QCSchema interface of the dftd4 Python-API.
"""
self.found(raise_error=True)
import dftd4
from dftd4.qcschema import run_qcschema
# strip engine hint
input_data = input_model.dict()
method = input_model.model.method
if method.startswith("d4-"):
method = method[3:]
input_data["model"]["method"] = method
qcvkey = method.upper() if method is not None else None
# send `from_arrays` the dftd4 behavior of functional specification overrides explicit parameters specification
# * differs from dftd3 harness behavior where parameters extend or override functional
# * stash the resolved plan in extras or, if errored, leave it for the proper dftd4 api to reject
param_tweaks = None if method else input_model.keywords.get("params_tweaks", None)
try:
planinfo = from_arrays(
verbose=1,
name_hint=method,
level_hint=input_model.keywords.get("level_hint", None),
param_tweaks=param_tweaks,
dashcoeff_supplement=input_model.keywords.get("dashcoeff_supplement", None),
)
except InputError:
pass
else:
input_data["extras"]["info"] = planinfo
# strip dispersion level from method
for alias, d4 in get_dispersion_aliases().items():
if d4 == "d4bjeeqatm" and method.lower().endswith(alias):
method = method[: -(len(alias) + 1)]
input_data["model"]["method"] = method
# consolidate dispersion level aliases
level_hint = input_model.keywords.get("level_hint", None)
if level_hint and get_dispersion_aliases()[level_hint.lower()] == "d4bjeeqatm":
level_hint = "d4"
input_data["keywords"]["level_hint"] = level_hint
input_model = AtomicInput(**input_data)
# Run the Harness
output = run_qcschema(input_model)
if "info" in output.extras:
qcvkey = output.extras["info"]["fctldash"].upper()
calcinfo = {}
energy = output.properties.return_energy
calcinfo["CURRENT ENERGY"] = energy
calcinfo["DISPERSION CORRECTION ENERGY"] = energy
if qcvkey:
calcinfo[f"{qcvkey} DISPERSION CORRECTION ENERGY"] = energy
if output.driver == "gradient":
gradient = output.return_result
calcinfo["CURRENT GRADIENT"] = gradient
calcinfo["DISPERSION CORRECTION GRADIENT"] = gradient
if qcvkey:
calcinfo[f"{qcvkey} DISPERSION CORRECTION GRADIENT"] = gradient
if output.keywords.get("pair_resolved", False):
pw2 = output.extras["dftd4"]["additive pairwise energy"]
pw3 = output.extras["dftd4"]["non-additive pairwise energy"]
assert abs(pw2.sum() + pw3.sum() - energy) < 1.0e-8, f"{pw2.sum()} + {pw3.sum()} != {energy}"
calcinfo["2-BODY DISPERSION CORRECTION ENERGY"] = pw2.sum()
calcinfo["3-BODY DISPERSION CORRECTION ENERGY"] = pw3.sum()
calcinfo["2-BODY PAIRWISE DISPERSION CORRECTION ANALYSIS"] = pw2
calcinfo["3-BODY PAIRWISE DISPERSION CORRECTION ANALYSIS"] = pw3
output.extras["qcvars"] = calcinfo
return output
| """
Harness for the DFT-D4 dispersion correction.
This implementation interfaces with the dftd4 Python-API, which provides
native support for QCSchema.
Therefore, this harness only has to provide a thin wrapper to integrate dftd4.
"""
from typing import Dict
from qcelemental.models import AtomicInput, AtomicResult
from qcelemental.util import safe_version, which_import
from ..config import TaskConfig
from ..exceptions import InputError
from .empirical_dispersion_resources import from_arrays, get_dispersion_aliases
from .model import ProgramHarness
class DFTD4Harness(ProgramHarness):
"""Calculation harness for the DFT-D4 dispersion correction."""
_defaults = {
"name": "dftd4",
"scratch": False,
"thread_safe": True,
"thread_parallel": False,
"node_parallel": False,
"managed_memory": False,
}
version_cache: Dict[str, str] = {}
class Config(ProgramHarness.Config):
pass
@staticmethod
def found(raise_error: bool = False) -> bool:
"""Check for the availability of the Python API of dftd4"""
return which_import(
"dftd4",
return_bool=True,
raise_error=raise_error,
raise_msg="Please install via a dftd4 version with enabled Python API",
)
def get_version(self) -> str:
"""Return the currently used version of dftd4"""
self.found(raise_error=True)
which_prog = which_import("dftd4")
if which_prog not in self.version_cache:
import dftd4
self.version_cache[which_prog] = safe_version(dftd4.__version__)
return self.version_cache[which_prog]
def compute(self, input_model: AtomicInput, config: TaskConfig) -> AtomicResult:
"""
Actual interface to the dftd4 package. The compute function is just a thin
wrapper around the native QCSchema interface of the dftd4 Python-API.
"""
self.found(raise_error=True)
import dftd4
from dftd4.qcschema import run_qcschema
# strip engine hint
input_data = input_model.dict()
method = input_model.model.method
if method.startswith("d4-"):
method = method[3:]
input_data["model"]["method"] = method
qcvkey = method.upper() if method is not None else None
# send `from_arrays` the dftd4 behavior of functional specification overrides explicit parameters specification
# * differs from dftd3 harness behavior where parameters extend or override functional
# * stash the resolved plan in extras or, if errored, leave it for the proper dftd4 api to reject
param_tweaks = None if method else input_model.keywords.get("params_tweaks", None)
try:
planinfo = from_arrays(
verbose=1,
name_hint=method,
level_hint=input_model.keywords.get("level_hint", None),
param_tweaks=param_tweaks,
dashcoeff_supplement=input_model.keywords.get("dashcoeff_supplement", None),
)
except InputError:
pass
else:
input_data["extras"]["info"] = planinfo
# strip dispersion level from method
for alias, d4 in get_dispersion_aliases().items():
if d4 == "d4bjeeqatm" and method.lower().endswith(alias):
method = method[: -(len(alias) + 1)]
input_data["model"]["method"] = method
# consolidate dispersion level aliases
level_hint = input_model.keywords.get("level_hint", None)
if level_hint and get_dispersion_aliases()[level_hint.lower()] == "d4bjeeqatm":
level_hint = "d4"
input_data["keywords"]["level_hint"] = level_hint
input_model = AtomicInput(**input_data)
# Run the Harness
output = run_qcschema(input_model)
if "info" in output.extras:
qcvkey = output.extras["info"]["fctldash"].upper()
calcinfo = {}
energy = output.properties.return_energy
calcinfo["CURRENT ENERGY"] = energy
calcinfo["DISPERSION CORRECTION ENERGY"] = energy
if qcvkey:
calcinfo[f"{qcvkey} DISPERSION CORRECTION ENERGY"] = energy
if output.driver == "gradient":
gradient = output.return_result
calcinfo["CURRENT GRADIENT"] = gradient
calcinfo["DISPERSION CORRECTION GRADIENT"] = gradient
if qcvkey:
calcinfo[f"{qcvkey} DISPERSION CORRECTION GRADIENT"] = gradient
if output.keywords.get("pair_resolved", False):
pw2 = output.extras["dftd4"]["additive pairwise energy"]
pw3 = output.extras["dftd4"]["non-additive pairwise energy"]
assert abs(pw2.sum() + pw3.sum() - energy) < 1.0e-8, f"{pw2.sum()} + {pw3.sum()} != {energy}"
calcinfo["2-BODY DISPERSION CORRECTION ENERGY"] = pw2.sum()
calcinfo["3-BODY DISPERSION CORRECTION ENERGY"] = pw3.sum()
calcinfo["2-BODY PAIRWISE DISPERSION CORRECTION ANALYSIS"] = pw2
calcinfo["3-BODY PAIRWISE DISPERSION CORRECTION ANALYSIS"] = pw3
output.extras["qcvars"] = calcinfo
return output | en | 0.700361 | Harness for the DFT-D4 dispersion correction. This implementation interfaces with the dftd4 Python-API, which provides native support for QCSchema. Therefore, this harness only has to provide a thin wrapper to integrate dftd4. Calculation harness for the DFT-D4 dispersion correction. Check for the availability of the Python API of dftd4 Return the currently used version of dftd4 Actual interface to the dftd4 package. The compute function is just a thin wrapper around the native QCSchema interface of the dftd4 Python-API. # strip engine hint # send `from_arrays` the dftd4 behavior of functional specification overrides explicit parameters specification # * differs from dftd3 harness behavior where parameters extend or override functional # * stash the resolved plan in extras or, if errored, leave it for the proper dftd4 api to reject # strip dispersion level from method # consolidate dispersion level aliases # Run the Harness | 2.120245 | 2 |
instrument.py | alkamid/lab-suite | 0 | 6621435 | <reponame>alkamid/lab-suite
import visa
rm = visa.ResourceManager()
class Instrument():
def __init__(self, address=None, identificator=None):
if address != None:
self.instr = rm.open_resource(address)
elif identificator != None:
self.find_instrument(identificator)
def find_instrument(self, instr_type):
identificators = {'stage': 'ESP300', 'pulser': 'HEWLETT-PACKARD', 'tempcon': 'Cryocon'}
for i in range(20):
self.instr = rm.open_resource('GPIB0::{0}::INSTR'.format(i))
try: self.id = self.instr.query('*IDN?')
except:
continue
if instr_type == 'lockin':
if self.id[0] in ('+', '-') and len(self.id) == 7:
self.id = 'EG&G PARC 5210 lock-in amplifier'
return 0
elif identificators[instr_type] in self.id:
return 0
print('Instrument not found!')
return -1
def ask(self, cmd):
return self.instr.ask(cmd)
def write(self, cmd):
return self.instr.write(cmd)
| import visa
rm = visa.ResourceManager()
class Instrument():
def __init__(self, address=None, identificator=None):
if address != None:
self.instr = rm.open_resource(address)
elif identificator != None:
self.find_instrument(identificator)
def find_instrument(self, instr_type):
identificators = {'stage': 'ESP300', 'pulser': 'HEWLETT-PACKARD', 'tempcon': 'Cryocon'}
for i in range(20):
self.instr = rm.open_resource('GPIB0::{0}::INSTR'.format(i))
try: self.id = self.instr.query('*IDN?')
except:
continue
if instr_type == 'lockin':
if self.id[0] in ('+', '-') and len(self.id) == 7:
self.id = 'EG&G PARC 5210 lock-in amplifier'
return 0
elif identificators[instr_type] in self.id:
return 0
print('Instrument not found!')
return -1
def ask(self, cmd):
return self.instr.ask(cmd)
def write(self, cmd):
return self.instr.write(cmd) | none | 1 | 2.689002 | 3 | |
python code/CNCController.py | bsbrl/craniobot | 4 | 6621436 | <filename>python code/CNCController.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 19 14:41:05 2017
@author: Franklin
"""
import serial
import time
import json
from pointGen import pointGen
class CNCController():
def assignPort(self,port):
if port.lower() == 'default':
self.port = 'COM4' #change this com port to the com port of your tinyG
#self.port = '/dev/tty.usbserial-DN01XFHI'
else:
self.port = port
def checkConnection(self):
#This checks the connection state of the Serial port
#If ser doesn't exist, print Open serial port.
self.flag = False
try:
if self.ser.isOpen():
print("CNC port is open at " + self.port)
else:
print(self.port + " is closed")
self.flag = True
except AttributeError:
print("Need to first open the serial port connection!")
self.flag = True
#return flag
def connect(self):
#This opens up the serial port connection to the CNC machine.
self.ser = serial.Serial(self.port, baudrate = 115200, timeout=1)
def disconnect(self):
#This closes the serial port connection to the CNC machine.
self.ser.close()
def jog(self, direction, step, speed):
#This is used to jog the machine
command = '{{"gc":"g91g1f{}{}{}"}}\n'.format(speed, direction, step)
self.ser.write(command.encode('utf-8'))
def goToXYOrigin(self, speed):
#This will take the machine to (x,y)=(0,0) at any z
command = '{{"gc":"g90g1f{}x0y0"}}\n'.format(speed)
self.ser.write(command.encode('utf-8'))
def runSingleProbe(self):
command = {"gc":"g38.2f5z-5"} # probe down/up to -5mm at a rate of 5 mm/min
self.ser.write('{}\n' .format(json.dumps(command)).encode('utf-8'))
def setOrigin(self):
#This sets the current position to (0,0,0)
self.ser.write(b'{"gc":"g28.3x0y0z0"}\n')
def currentPosition(self):
#This returns the current position of the CNC machine
self.ser.write(b'{"pos":n}\n')
if self.ser.inWaiting: #Is there anything to read?
print(self.ser.readlines())
def wakeUp(self):
#This wakes up the CNC machine if it's been idle for awhile
self.ser.write(b'\r\n\r\n\r\n')
time.sleep(2)
#Is there anything to read?
while self.ser.inWaiting():
num_bytes = self.ser.inWaiting()
message = self.ser.read(num_bytes).decode('ascii')
print(message)
def checkConfiguration(self):
#This checks that the configuration of TinyG is set approriately for the code
self.ser.write(b'{"sys":n}\n')
print("Checking that the TinyG configuration settings are correct...\n")
time.sleep(2)
configuration = list()
flag = False
while self.ser.inWaiting():
configuration.append(json.loads(self.ser.readline().decode('ascii'))) #Get the response and parse as JSON
if configuration[-1]["r"]["sys"]["jv"] != 5:
print("JSON reporting verbosity is configured incorrectly\n")
flag = True
else:
print("JSON verbosity set correctly to 5! Safe to continue.\n")
if configuration[-1]["r"]["sys"]["qv"]:
print("The queue report verbosity settings are incorrectly set to 1.\n")
flag = True
else:
print("The queue report verbosity settings are correct! Safe to continue.\n")
if configuration[-1]["r"]["sys"]["sv"]:
print("The status report verbosity settings are incorrectly set to 1.\n")
flag = True
else:
print("The status report verbosity settings are correct! Safe to continue.\n")
return flag
def runProbe(self,gCode):
reports = list() # An empty list to store status reports from TinyG
if self.checkConnection(): #Make sure we have a connection
print("Connection issue. Please try again.")
return
self.ser.flushInput() #Flush startup text in serial input
self.wakeUp()
if self.checkConfiguration(): #make sure configuration settings are correct and OK to continue
print("The configuration is set incorrectly. Please fix")
return
#Send a few lines of gcode to get the TinyG planning buffer filled a bit
n=5
for x in range(n):
#self.ser.write(probe_commands[x].encode('utf-8'))
self.ser.write('{}\n'.format(json.dumps(gCode[x])).encode('utf-8'))
#Now send a new line of gcode every time TinyG returns a status saying it's completed a line of gcode
#This prevents over filling the planning buffer. Do this until we've made it through the entire code.
#See https://github.com/synthetos/TinyG/issues/175 for more info
#and also https://onehossshay.wordpress.com/2011/08/26/grbl-a-simple-python-interface/
while n<len(gCode):
if self.ser.inWaiting(): #is there something to be read?
reports.append(json.loads(self.ser.readline().decode('ascii'))) #Get the response and parse as JSON
if "gc" in reports[-1]["r"]: #if we get a system report saying a gCode command is complete, send another line of gcode.
#self.ser.write(probe_commands[n].encode('utf-8'))
self.ser.write('{}\n'.format(json.dumps(gCode[n])).encode('utf-8'))
runProbe_percent_complete = n/len(gCode)*100 #matt
print("runProbe progress: ", runProbe_percent_complete, "%") #matt
n+=1 #index the while loop and loop back until we've sent all the gcode commands.
while "m2" not in json.dumps(reports[-1]).lower(): #now sit and read the serial line until the program end flag is sent by TinyG
if self.ser.inWaiting():
reports.append(json.loads(self.ser.readline().decode('ascii'))) #read out remaining serial output from Tiny g until last entry is an empty string
self.probe_output = [item for item in reports if "prb" in item["r"]] #filter out only probe end returns, and convert the strings to JSON.
#return probe_output
def runMill(self,gCode):
reports = list() # An empty list to store status reports from TinyG
if self.checkConnection(): #Make sure we have a connection
print("Connection issue. Please try again.")
return
self.ser.flushInput() #Flush startup text in serial input
self.wakeUp()
if self.checkConfiguration(): #make sure configuration settings are correct and OK to continue
print("The configuration is set incorrectly. Please fix")
return
#Send a few lines of gcode to get the TinyG planning buffer filled a bit
n=5
for x in range(n):
'''#self.ser.write(probe_commands[x].encode('utf-8'))'''
self.ser.write('{}\n'.format(json.dumps(gCode[x])).encode('utf-8'))
#Now send a new line of gcode every time TinyG returns a status saying it's completed a line of gcode
#This prevents over filling the planning buffer. Do this until we've made it through the entire code.
#See https://github.com/synthetos/TinyG/issues/175 for more info
#and also https://onehossshay.wordpress.com/2011/08/26/grbl-a-simple-python-interface/
while n<len(gCode):
if self.ser.inWaiting(): #is there something to be read?
reports.append(json.loads(self.ser.readline().decode('ascii'))) #Get the response and parse as JSON
if "gc" in reports[-1]["r"]: #if we get a system report saying a gCode command is complete, send another line of gcode.
#self.ser.write(probe_commands[n].encode('utf-8'))
self.ser.write('{}\n'.format(json.dumps(gCode[n])).encode('utf-8'))
n+=1 #index the while loop and loop back until we've sent all the gcode commands.
while "m2" not in json.dumps(reports[-1]).lower(): #now sit and read the serial line until the program end flag is sent by TinyG
if self.ser.inWaiting():
reports.append(json.loads(self.ser.readline().decode('ascii'))) #read out remaining serial output from Tiny g until last entry is an empty string.
return reports
| <filename>python code/CNCController.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 19 14:41:05 2017
@author: Franklin
"""
import serial
import time
import json
from pointGen import pointGen
class CNCController():
def assignPort(self,port):
if port.lower() == 'default':
self.port = 'COM4' #change this com port to the com port of your tinyG
#self.port = '/dev/tty.usbserial-DN01XFHI'
else:
self.port = port
def checkConnection(self):
#This checks the connection state of the Serial port
#If ser doesn't exist, print Open serial port.
self.flag = False
try:
if self.ser.isOpen():
print("CNC port is open at " + self.port)
else:
print(self.port + " is closed")
self.flag = True
except AttributeError:
print("Need to first open the serial port connection!")
self.flag = True
#return flag
def connect(self):
#This opens up the serial port connection to the CNC machine.
self.ser = serial.Serial(self.port, baudrate = 115200, timeout=1)
def disconnect(self):
#This closes the serial port connection to the CNC machine.
self.ser.close()
def jog(self, direction, step, speed):
#This is used to jog the machine
command = '{{"gc":"g91g1f{}{}{}"}}\n'.format(speed, direction, step)
self.ser.write(command.encode('utf-8'))
def goToXYOrigin(self, speed):
#This will take the machine to (x,y)=(0,0) at any z
command = '{{"gc":"g90g1f{}x0y0"}}\n'.format(speed)
self.ser.write(command.encode('utf-8'))
def runSingleProbe(self):
command = {"gc":"g38.2f5z-5"} # probe down/up to -5mm at a rate of 5 mm/min
self.ser.write('{}\n' .format(json.dumps(command)).encode('utf-8'))
def setOrigin(self):
#This sets the current position to (0,0,0)
self.ser.write(b'{"gc":"g28.3x0y0z0"}\n')
def currentPosition(self):
#This returns the current position of the CNC machine
self.ser.write(b'{"pos":n}\n')
if self.ser.inWaiting: #Is there anything to read?
print(self.ser.readlines())
def wakeUp(self):
#This wakes up the CNC machine if it's been idle for awhile
self.ser.write(b'\r\n\r\n\r\n')
time.sleep(2)
#Is there anything to read?
while self.ser.inWaiting():
num_bytes = self.ser.inWaiting()
message = self.ser.read(num_bytes).decode('ascii')
print(message)
def checkConfiguration(self):
#This checks that the configuration of TinyG is set approriately for the code
self.ser.write(b'{"sys":n}\n')
print("Checking that the TinyG configuration settings are correct...\n")
time.sleep(2)
configuration = list()
flag = False
while self.ser.inWaiting():
configuration.append(json.loads(self.ser.readline().decode('ascii'))) #Get the response and parse as JSON
if configuration[-1]["r"]["sys"]["jv"] != 5:
print("JSON reporting verbosity is configured incorrectly\n")
flag = True
else:
print("JSON verbosity set correctly to 5! Safe to continue.\n")
if configuration[-1]["r"]["sys"]["qv"]:
print("The queue report verbosity settings are incorrectly set to 1.\n")
flag = True
else:
print("The queue report verbosity settings are correct! Safe to continue.\n")
if configuration[-1]["r"]["sys"]["sv"]:
print("The status report verbosity settings are incorrectly set to 1.\n")
flag = True
else:
print("The status report verbosity settings are correct! Safe to continue.\n")
return flag
def runProbe(self,gCode):
reports = list() # An empty list to store status reports from TinyG
if self.checkConnection(): #Make sure we have a connection
print("Connection issue. Please try again.")
return
self.ser.flushInput() #Flush startup text in serial input
self.wakeUp()
if self.checkConfiguration(): #make sure configuration settings are correct and OK to continue
print("The configuration is set incorrectly. Please fix")
return
#Send a few lines of gcode to get the TinyG planning buffer filled a bit
n=5
for x in range(n):
#self.ser.write(probe_commands[x].encode('utf-8'))
self.ser.write('{}\n'.format(json.dumps(gCode[x])).encode('utf-8'))
#Now send a new line of gcode every time TinyG returns a status saying it's completed a line of gcode
#This prevents over filling the planning buffer. Do this until we've made it through the entire code.
#See https://github.com/synthetos/TinyG/issues/175 for more info
#and also https://onehossshay.wordpress.com/2011/08/26/grbl-a-simple-python-interface/
while n<len(gCode):
if self.ser.inWaiting(): #is there something to be read?
reports.append(json.loads(self.ser.readline().decode('ascii'))) #Get the response and parse as JSON
if "gc" in reports[-1]["r"]: #if we get a system report saying a gCode command is complete, send another line of gcode.
#self.ser.write(probe_commands[n].encode('utf-8'))
self.ser.write('{}\n'.format(json.dumps(gCode[n])).encode('utf-8'))
runProbe_percent_complete = n/len(gCode)*100 #matt
print("runProbe progress: ", runProbe_percent_complete, "%") #matt
n+=1 #index the while loop and loop back until we've sent all the gcode commands.
while "m2" not in json.dumps(reports[-1]).lower(): #now sit and read the serial line until the program end flag is sent by TinyG
if self.ser.inWaiting():
reports.append(json.loads(self.ser.readline().decode('ascii'))) #read out remaining serial output from Tiny g until last entry is an empty string
self.probe_output = [item for item in reports if "prb" in item["r"]] #filter out only probe end returns, and convert the strings to JSON.
#return probe_output
def runMill(self,gCode):
reports = list() # An empty list to store status reports from TinyG
if self.checkConnection(): #Make sure we have a connection
print("Connection issue. Please try again.")
return
self.ser.flushInput() #Flush startup text in serial input
self.wakeUp()
if self.checkConfiguration(): #make sure configuration settings are correct and OK to continue
print("The configuration is set incorrectly. Please fix")
return
#Send a few lines of gcode to get the TinyG planning buffer filled a bit
n=5
for x in range(n):
'''#self.ser.write(probe_commands[x].encode('utf-8'))'''
self.ser.write('{}\n'.format(json.dumps(gCode[x])).encode('utf-8'))
#Now send a new line of gcode every time TinyG returns a status saying it's completed a line of gcode
#This prevents over filling the planning buffer. Do this until we've made it through the entire code.
#See https://github.com/synthetos/TinyG/issues/175 for more info
#and also https://onehossshay.wordpress.com/2011/08/26/grbl-a-simple-python-interface/
while n<len(gCode):
if self.ser.inWaiting(): #is there something to be read?
reports.append(json.loads(self.ser.readline().decode('ascii'))) #Get the response and parse as JSON
if "gc" in reports[-1]["r"]: #if we get a system report saying a gCode command is complete, send another line of gcode.
#self.ser.write(probe_commands[n].encode('utf-8'))
self.ser.write('{}\n'.format(json.dumps(gCode[n])).encode('utf-8'))
n+=1 #index the while loop and loop back until we've sent all the gcode commands.
while "m2" not in json.dumps(reports[-1]).lower(): #now sit and read the serial line until the program end flag is sent by TinyG
if self.ser.inWaiting():
reports.append(json.loads(self.ser.readline().decode('ascii'))) #read out remaining serial output from Tiny g until last entry is an empty string.
return reports
| en | 0.845374 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Fri May 19 14:41:05 2017 @author: Franklin #change this com port to the com port of your tinyG #self.port = '/dev/tty.usbserial-DN01XFHI' #This checks the connection state of the Serial port #If ser doesn't exist, print Open serial port. #return flag #This opens up the serial port connection to the CNC machine. #This closes the serial port connection to the CNC machine. #This is used to jog the machine #This will take the machine to (x,y)=(0,0) at any z # probe down/up to -5mm at a rate of 5 mm/min #This sets the current position to (0,0,0) #This returns the current position of the CNC machine #Is there anything to read? #This wakes up the CNC machine if it's been idle for awhile #Is there anything to read? #This checks that the configuration of TinyG is set approriately for the code #Get the response and parse as JSON # An empty list to store status reports from TinyG #Make sure we have a connection #Flush startup text in serial input #make sure configuration settings are correct and OK to continue #Send a few lines of gcode to get the TinyG planning buffer filled a bit #self.ser.write(probe_commands[x].encode('utf-8')) #Now send a new line of gcode every time TinyG returns a status saying it's completed a line of gcode #This prevents over filling the planning buffer. Do this until we've made it through the entire code. #See https://github.com/synthetos/TinyG/issues/175 for more info #and also https://onehossshay.wordpress.com/2011/08/26/grbl-a-simple-python-interface/ #is there something to be read? #Get the response and parse as JSON #if we get a system report saying a gCode command is complete, send another line of gcode. #self.ser.write(probe_commands[n].encode('utf-8')) #matt #matt #index the while loop and loop back until we've sent all the gcode commands. #now sit and read the serial line until the program end flag is sent by TinyG #read out remaining serial output from Tiny g until last entry is an empty string #filter out only probe end returns, and convert the strings to JSON. #return probe_output # An empty list to store status reports from TinyG #Make sure we have a connection #Flush startup text in serial input #make sure configuration settings are correct and OK to continue #Send a few lines of gcode to get the TinyG planning buffer filled a bit #self.ser.write(probe_commands[x].encode('utf-8')) #Now send a new line of gcode every time TinyG returns a status saying it's completed a line of gcode #This prevents over filling the planning buffer. Do this until we've made it through the entire code. #See https://github.com/synthetos/TinyG/issues/175 for more info #and also https://onehossshay.wordpress.com/2011/08/26/grbl-a-simple-python-interface/ #is there something to be read? #Get the response and parse as JSON #if we get a system report saying a gCode command is complete, send another line of gcode. #self.ser.write(probe_commands[n].encode('utf-8')) #index the while loop and loop back until we've sent all the gcode commands. #now sit and read the serial line until the program end flag is sent by TinyG #read out remaining serial output from Tiny g until last entry is an empty string. | 2.613891 | 3 |
concurrency/futures/futures_test_share_object.py | scotthuang1989/Python-3-Module-of-the-Week | 2 | 6621437 | <reponame>scotthuang1989/Python-3-Module-of-the-Week<filename>concurrency/futures/futures_test_share_object.py
"""
test if share state is possible
"""
from concurrent import futures
from multiprocessing import Manager
share_list =[]
def task(n):
print(n)
share_list.append(n)
with futures.ThreadPoolExecutor(max_workers=2) as ex:
print('main: starting')
ex.submit(task, 1)
ex.submit(task, 2)
ex.submit(task, 3)
ex.submit(task, 4)
print('thread test: done')
print(share_list)
# must use Manager as proxy
manager = Manager()
share_list_process = manager.list([])
def task_p(n, t_list):
t_list.append(n)
return n
with futures.ProcessPoolExecutor(max_workers=2) as ex:
print('main: starting')
ex.submit(task_p, 5, share_list_process)
ex.submit(task_p, 6, share_list_process)
ex.submit(task_p, 7, share_list_process)
ex.submit(task_p, 8, share_list_process)
print('process test: done')
print(share_list_process)
| """
test if share state is possible
"""
from concurrent import futures
from multiprocessing import Manager
share_list =[]
def task(n):
print(n)
share_list.append(n)
with futures.ThreadPoolExecutor(max_workers=2) as ex:
print('main: starting')
ex.submit(task, 1)
ex.submit(task, 2)
ex.submit(task, 3)
ex.submit(task, 4)
print('thread test: done')
print(share_list)
# must use Manager as proxy
manager = Manager()
share_list_process = manager.list([])
def task_p(n, t_list):
t_list.append(n)
return n
with futures.ProcessPoolExecutor(max_workers=2) as ex:
print('main: starting')
ex.submit(task_p, 5, share_list_process)
ex.submit(task_p, 6, share_list_process)
ex.submit(task_p, 7, share_list_process)
ex.submit(task_p, 8, share_list_process)
print('process test: done')
print(share_list_process) | en | 0.925656 | test if share state is possible # must use Manager as proxy | 3.411826 | 3 |
api/resources.py | aeisenbarth/am-segmentation | 3 | 6621438 | import re
import falcon
import json
ALLOWED_IMAGE_TYPES = (
'image/png',
)
_UUID_PATTERN = re.compile(
'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$'
)
def validate_image_type(req, resp, resource, params):
if req.content_type not in ALLOWED_IMAGE_TYPES:
msg = f'Image type not allowed. Must be one of {ALLOWED_IMAGE_TYPES}'
raise falcon.HTTPBadRequest('Bad request', msg)
def validate_task_id(req, resp, resource, params):
# Always validate untrusted input!
if not _UUID_PATTERN.match(params.get('task_id', '')):
raise IOError('Wrong task id')
# class AblationMaskCollection(object):
#
# def __init__(self, task_manager):
# self._task_manager = task_manager
#
# def on_get(self, req, resp):
# mask_docs = [{'href': f'/masks/{fn}'}
# for fn in self._image_store.list_masks()]
# doc = {
# 'masks': mask_docs
# }
# resp.body = json.dumps(doc, ensure_ascii=False)
# resp.status = falcon.HTTP_200
class AblationMask(object):
def __init__(self, task_manager):
self._task_manager = task_manager
@falcon.before(validate_task_id)
def on_get(self, req, resp, task_id):
try:
resp.stream, resp.stream_len = self._task_manager.read_result(task_id)
resp.content_type = falcon.MEDIA_PNG
except IOError as e:
raise falcon.HTTPNotFound()
class SegmentationTaskCollection(object):
def __init__(self, task_manager):
self._task_manager = task_manager
@falcon.before(validate_image_type)
def on_post(self, req, resp):
task_id = self._task_manager.create_task(req.stream, req.content_type)
resp.status = falcon.HTTP_201
resp.location = '/tasks/' + task_id
class SegmentationTask(object):
def __init__(self, task_manager):
self._task_manager = task_manager
@falcon.before(validate_task_id)
def on_get(self, req, resp, task_id):
doc = {
'status': self._task_manager.task_status(task_id)
}
resp.body = json.dumps(doc, ensure_ascii=False)
resp.status = falcon.HTTP_200
| import re
import falcon
import json
ALLOWED_IMAGE_TYPES = (
'image/png',
)
_UUID_PATTERN = re.compile(
'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$'
)
def validate_image_type(req, resp, resource, params):
if req.content_type not in ALLOWED_IMAGE_TYPES:
msg = f'Image type not allowed. Must be one of {ALLOWED_IMAGE_TYPES}'
raise falcon.HTTPBadRequest('Bad request', msg)
def validate_task_id(req, resp, resource, params):
# Always validate untrusted input!
if not _UUID_PATTERN.match(params.get('task_id', '')):
raise IOError('Wrong task id')
# class AblationMaskCollection(object):
#
# def __init__(self, task_manager):
# self._task_manager = task_manager
#
# def on_get(self, req, resp):
# mask_docs = [{'href': f'/masks/{fn}'}
# for fn in self._image_store.list_masks()]
# doc = {
# 'masks': mask_docs
# }
# resp.body = json.dumps(doc, ensure_ascii=False)
# resp.status = falcon.HTTP_200
class AblationMask(object):
def __init__(self, task_manager):
self._task_manager = task_manager
@falcon.before(validate_task_id)
def on_get(self, req, resp, task_id):
try:
resp.stream, resp.stream_len = self._task_manager.read_result(task_id)
resp.content_type = falcon.MEDIA_PNG
except IOError as e:
raise falcon.HTTPNotFound()
class SegmentationTaskCollection(object):
def __init__(self, task_manager):
self._task_manager = task_manager
@falcon.before(validate_image_type)
def on_post(self, req, resp):
task_id = self._task_manager.create_task(req.stream, req.content_type)
resp.status = falcon.HTTP_201
resp.location = '/tasks/' + task_id
class SegmentationTask(object):
def __init__(self, task_manager):
self._task_manager = task_manager
@falcon.before(validate_task_id)
def on_get(self, req, resp, task_id):
doc = {
'status': self._task_manager.task_status(task_id)
}
resp.body = json.dumps(doc, ensure_ascii=False)
resp.status = falcon.HTTP_200
| en | 0.311764 | # Always validate untrusted input! # class AblationMaskCollection(object): # # def __init__(self, task_manager): # self._task_manager = task_manager # # def on_get(self, req, resp): # mask_docs = [{'href': f'/masks/{fn}'} # for fn in self._image_store.list_masks()] # doc = { # 'masks': mask_docs # } # resp.body = json.dumps(doc, ensure_ascii=False) # resp.status = falcon.HTTP_200 | 2.435816 | 2 |
LoremPizzum/products/urls.py | BruhMano/LoremPizzum | 0 | 6621439 | <filename>LoremPizzum/products/urls.py
from django.urls import path
from .views import *
urlpatterns = [
path('',index,name = 'index'),
path('menu/',menu,name ="menu"),
path('menu/<int:ids>/',product_details,name = "product"),
] | <filename>LoremPizzum/products/urls.py
from django.urls import path
from .views import *
urlpatterns = [
path('',index,name = 'index'),
path('menu/',menu,name ="menu"),
path('menu/<int:ids>/',product_details,name = "product"),
] | none | 1 | 1.807526 | 2 | |
trailTracer/scripts/trailTracerLib.py | Colin-Lundquist/trailTracer | 1 | 6621440 | from imutils.video import WebcamVideoStream
import time
import signal
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Signal Handlers
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def shutdown():
fan.set(on=0)
tilt_servo.disable()
servo_data.seek(0,0)
servo_data.writelines([str(int(pan_servo.pos))+'\n', str(int(tilt_servo.pos))+'\n'])
if args.f is True:
video_out.release()
cv2.destroyAllWindows()
def ctrl_c_handler(sig, frame):
shutdown()
signal.signal(signal.SIGINT, ctrl_c_handler)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Video Setup
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def video_setup(boottime):
print(" Beginning PiCam stream...") # notify user
#video = VideoStream(src=0).start() # start camera stream
video = WebcamVideoStream(src=0).start()
time.sleep(boottime) # allow camera to warm up
if args.f is True: # Create video writer object if video argument is passed
video_data = open("./data/video_data","r+") # open video data file
video_num = int(video_data.readline()) # read the current video number
video_num += 1 # increment video_number: next video will be ++
print("save stream to: trailTracer/Videos/trailTrace_%d.mp4" % video_num)
video_data.seek(0,0) # move to beginning
if args.r is True: # User requested videos to be deleted
user_in = input(' Delete all Videos: are you sure? (y/n): ')
if user_in.lower() == 'y' or user_in.lower() == 'yes': # Test for any yes response
video_data.write("0\n") # restore video number to zero
os.system("rm ./Videos/*.mp4")
quit()
video_data.write("%s\n" %(str(video_num))) # write new num
video_data.close()
video_out = cv2.VideoWriter(filename='./Videos/trailTrace_%d.mp4' % video_num,
fourcc=cv2.VideoWriter_fourcc('X','2','6','4'),
fps=15,
frameSize=(640,480))
| from imutils.video import WebcamVideoStream
import time
import signal
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Signal Handlers
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def shutdown():
fan.set(on=0)
tilt_servo.disable()
servo_data.seek(0,0)
servo_data.writelines([str(int(pan_servo.pos))+'\n', str(int(tilt_servo.pos))+'\n'])
if args.f is True:
video_out.release()
cv2.destroyAllWindows()
def ctrl_c_handler(sig, frame):
shutdown()
signal.signal(signal.SIGINT, ctrl_c_handler)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Video Setup
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def video_setup(boottime):
print(" Beginning PiCam stream...") # notify user
#video = VideoStream(src=0).start() # start camera stream
video = WebcamVideoStream(src=0).start()
time.sleep(boottime) # allow camera to warm up
if args.f is True: # Create video writer object if video argument is passed
video_data = open("./data/video_data","r+") # open video data file
video_num = int(video_data.readline()) # read the current video number
video_num += 1 # increment video_number: next video will be ++
print("save stream to: trailTracer/Videos/trailTrace_%d.mp4" % video_num)
video_data.seek(0,0) # move to beginning
if args.r is True: # User requested videos to be deleted
user_in = input(' Delete all Videos: are you sure? (y/n): ')
if user_in.lower() == 'y' or user_in.lower() == 'yes': # Test for any yes response
video_data.write("0\n") # restore video number to zero
os.system("rm ./Videos/*.mp4")
quit()
video_data.write("%s\n" %(str(video_num))) # write new num
video_data.close()
video_out = cv2.VideoWriter(filename='./Videos/trailTrace_%d.mp4' % video_num,
fourcc=cv2.VideoWriter_fourcc('X','2','6','4'),
fps=15,
frameSize=(640,480))
| en | 0.354634 | #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Signal Handlers #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Video Setup #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # notify user #video = VideoStream(src=0).start() # start camera stream # allow camera to warm up # Create video writer object if video argument is passed # open video data file # read the current video number # increment video_number: next video will be ++ # move to beginning # User requested videos to be deleted # Test for any yes response # restore video number to zero # write new num | 2.646439 | 3 |
platalea/__init__.py | gchrupala/platalea | 1 | 6621441 | <reponame>gchrupala/platalea
import configargparse
configargparse.init_argument_parser(name='platalea', default_config_files=['config.ini', 'config.yml'])
parser = configargparse.get_argument_parser(name='platalea')
parser.add_argument('--data_root', env_var='PLATALEA_DATA_ROOT',
action='store', default='/roaming/gchrupal/datasets/flickr8k/',
dest='data_root',
help='location of the flickr8k (or similar) dataset')
parser.add_argument('--meta', env_var='PLATALEA_METADATA_JSON',
action='store', default='dataset_multilingual.json',
dest='meta',
help='filename of the metadata file (dataset.json or similar) relative to the dataset location')
parser.add_argument('--audio_features_fn', env_var='PLATALEA_AUDIO_FEATURES_FN',
action='store', default='mfcc_delta_features.pt',
dest='audio_features_fn',
help='filename of the MFCC audio features file relative to the dataset location')
parser.add_argument('--audio_subdir', env_var='PLATALEA_AUDIO_SUBDIR',
action='store', default='flickr_audio/wavs/',
dest='audio_subdir',
help='directory containing the flickr8k wav files, relative to the dataset location')
parser.add_argument('--image_subdir', env_var='PLATALEA_IMAGE_SUBDIR',
action='store', default='Flickr8k_Dataset/Flicker8k_Dataset/',
dest='image_subdir',
help='directory containing the flickr8k image files, relative to the dataset location')
| import configargparse
configargparse.init_argument_parser(name='platalea', default_config_files=['config.ini', 'config.yml'])
parser = configargparse.get_argument_parser(name='platalea')
parser.add_argument('--data_root', env_var='PLATALEA_DATA_ROOT',
action='store', default='/roaming/gchrupal/datasets/flickr8k/',
dest='data_root',
help='location of the flickr8k (or similar) dataset')
parser.add_argument('--meta', env_var='PLATALEA_METADATA_JSON',
action='store', default='dataset_multilingual.json',
dest='meta',
help='filename of the metadata file (dataset.json or similar) relative to the dataset location')
parser.add_argument('--audio_features_fn', env_var='PLATALEA_AUDIO_FEATURES_FN',
action='store', default='mfcc_delta_features.pt',
dest='audio_features_fn',
help='filename of the MFCC audio features file relative to the dataset location')
parser.add_argument('--audio_subdir', env_var='PLATALEA_AUDIO_SUBDIR',
action='store', default='flickr_audio/wavs/',
dest='audio_subdir',
help='directory containing the flickr8k wav files, relative to the dataset location')
parser.add_argument('--image_subdir', env_var='PLATALEA_IMAGE_SUBDIR',
action='store', default='Flickr8k_Dataset/Flicker8k_Dataset/',
dest='image_subdir',
help='directory containing the flickr8k image files, relative to the dataset location') | none | 1 | 1.984767 | 2 | |
leetcode/687_longest_univalue_path/687_longest_univalue_path.py | ryangillard/misc | 0 | 6621442 | <gh_stars>0
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def __init__(self):
self.max_longest = 0
def longestUnivaluePath(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.postorder(root)
return self.max_longest
def postorder(self, root):
if not root:
return 0
left_len = self.postorder(root.left)
right_len = self.postorder(root.right)
left = 0
if root.left and root.left.val == root.val:
left = left_len + 1
right = 0
if root.right and root.right.val == root.val:
right = right_len + 1
self.max_longest = max(self.max_longest, left + right)
return max(left, right) | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def __init__(self):
self.max_longest = 0
def longestUnivaluePath(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.postorder(root)
return self.max_longest
def postorder(self, root):
if not root:
return 0
left_len = self.postorder(root.left)
right_len = self.postorder(root.right)
left = 0
if root.left and root.left.val == root.val:
left = left_len + 1
right = 0
if root.right and root.right.val == root.val:
right = right_len + 1
self.max_longest = max(self.max_longest, left + right)
return max(left, right) | en | 0.46731 | # Definition for a binary tree node. # class TreeNode(object): # def __init__(self, x): # self.val = x # self.left = None # self.right = None :type root: TreeNode :rtype: int | 3.863636 | 4 |
core/model/layers.py | pjenpoomjai/LipNet | 22 | 6621443 | <gh_stars>10-100
from keras import backend as k
from keras.layers import Input
from keras.layers.convolutional import Conv3D, ZeroPadding3D
from keras.layers.core import Activation, Dense, Flatten, Lambda, SpatialDropout3D
from keras.layers.normalization import BatchNormalization
from keras.layers.pooling import MaxPooling3D
from keras.layers.recurrent import GRU
from keras.layers.wrappers import Bidirectional, TimeDistributed
INPUT_TYPE = 'float32'
ZERO_PADDING = (1, 2, 2)
ACTIVATION_FN = 'relu'
CONV_KERNEL_INIT = 'he_normal'
CONV_KERNEL_SIZE = (3, 5, 5)
CONV_STRIDES = (1, 2, 2)
POOL_SIZE = (1, 2, 2)
POOL_STRIDES = (1, 2, 2)
DROPOUT_RATE = 0.5
GRU_ACTIVATION = None
GRU_UNITS = 256
GRU_KERNEL_INIT = 'Orthogonal'
GRU_MERGE_MODE = 'concat'
def create_input_layer(name: str, shape, dtype: str = INPUT_TYPE) -> Input:
return Input(shape=shape, dtype=dtype, name=name)
def create_zero_layer(name: str, input_layer, padding: tuple = ZERO_PADDING) -> ZeroPadding3D:
return ZeroPadding3D(padding=padding, name=name)(input_layer)
def create_conv_layer(name: str, input_layer, filters: int, kernel_size: tuple = CONV_KERNEL_SIZE) -> Conv3D:
return Conv3D(filters, kernel_size, strides=CONV_STRIDES, kernel_initializer=CONV_KERNEL_INIT, name=name)(input_layer)
def create_batc_layer(name: str, input_layer) -> BatchNormalization:
return BatchNormalization(name=name)(input_layer)
def create_actv_layer(name: str, input_layer, activation: str = ACTIVATION_FN) -> Activation:
return Activation(activation, name=name)(input_layer)
def create_pool_layer(name: str, input_layer) -> MaxPooling3D:
return MaxPooling3D(pool_size=POOL_SIZE, strides=POOL_STRIDES, name=name)(input_layer)
def create_drop_layer(name: str, input_layer) -> SpatialDropout3D:
return SpatialDropout3D(DROPOUT_RATE, name=name)(input_layer)
def create_bi_gru_layer(name: str, input_layer, units: int = GRU_UNITS, activation: str = GRU_ACTIVATION) -> Bidirectional:
return Bidirectional(GRU(units, return_sequences=True, activation=activation, kernel_initializer=GRU_KERNEL_INIT, name=name), merge_mode='concat')(input_layer)
def create_timed_layer(input_layer) -> TimeDistributed:
return TimeDistributed(Flatten())(input_layer)
def create_dense_layer(name: str, input_layer, output_size, kernel_initializer=CONV_KERNEL_INIT) -> Dense:
return Dense(output_size, kernel_initializer=kernel_initializer, name=name)(input_layer)
def ctc_lambda_func(args):
y_pred, labels, input_length, label_length = args
y_pred = y_pred[:, :, :]
return k.ctc_batch_cost(labels, y_pred, input_length, label_length)
def ctc(name: str, args) -> Lambda:
return Lambda(ctc_lambda_func, output_shape=(1,), name=name)(args)
def create_ctc_layer(name: str, y_pred, input_labels, input_length, label_length) -> Lambda:
return ctc(name, [y_pred, input_labels, input_length, label_length])
| from keras import backend as k
from keras.layers import Input
from keras.layers.convolutional import Conv3D, ZeroPadding3D
from keras.layers.core import Activation, Dense, Flatten, Lambda, SpatialDropout3D
from keras.layers.normalization import BatchNormalization
from keras.layers.pooling import MaxPooling3D
from keras.layers.recurrent import GRU
from keras.layers.wrappers import Bidirectional, TimeDistributed
INPUT_TYPE = 'float32'
ZERO_PADDING = (1, 2, 2)
ACTIVATION_FN = 'relu'
CONV_KERNEL_INIT = 'he_normal'
CONV_KERNEL_SIZE = (3, 5, 5)
CONV_STRIDES = (1, 2, 2)
POOL_SIZE = (1, 2, 2)
POOL_STRIDES = (1, 2, 2)
DROPOUT_RATE = 0.5
GRU_ACTIVATION = None
GRU_UNITS = 256
GRU_KERNEL_INIT = 'Orthogonal'
GRU_MERGE_MODE = 'concat'
def create_input_layer(name: str, shape, dtype: str = INPUT_TYPE) -> Input:
return Input(shape=shape, dtype=dtype, name=name)
def create_zero_layer(name: str, input_layer, padding: tuple = ZERO_PADDING) -> ZeroPadding3D:
return ZeroPadding3D(padding=padding, name=name)(input_layer)
def create_conv_layer(name: str, input_layer, filters: int, kernel_size: tuple = CONV_KERNEL_SIZE) -> Conv3D:
return Conv3D(filters, kernel_size, strides=CONV_STRIDES, kernel_initializer=CONV_KERNEL_INIT, name=name)(input_layer)
def create_batc_layer(name: str, input_layer) -> BatchNormalization:
return BatchNormalization(name=name)(input_layer)
def create_actv_layer(name: str, input_layer, activation: str = ACTIVATION_FN) -> Activation:
return Activation(activation, name=name)(input_layer)
def create_pool_layer(name: str, input_layer) -> MaxPooling3D:
return MaxPooling3D(pool_size=POOL_SIZE, strides=POOL_STRIDES, name=name)(input_layer)
def create_drop_layer(name: str, input_layer) -> SpatialDropout3D:
return SpatialDropout3D(DROPOUT_RATE, name=name)(input_layer)
def create_bi_gru_layer(name: str, input_layer, units: int = GRU_UNITS, activation: str = GRU_ACTIVATION) -> Bidirectional:
return Bidirectional(GRU(units, return_sequences=True, activation=activation, kernel_initializer=GRU_KERNEL_INIT, name=name), merge_mode='concat')(input_layer)
def create_timed_layer(input_layer) -> TimeDistributed:
return TimeDistributed(Flatten())(input_layer)
def create_dense_layer(name: str, input_layer, output_size, kernel_initializer=CONV_KERNEL_INIT) -> Dense:
return Dense(output_size, kernel_initializer=kernel_initializer, name=name)(input_layer)
def ctc_lambda_func(args):
y_pred, labels, input_length, label_length = args
y_pred = y_pred[:, :, :]
return k.ctc_batch_cost(labels, y_pred, input_length, label_length)
def ctc(name: str, args) -> Lambda:
return Lambda(ctc_lambda_func, output_shape=(1,), name=name)(args)
def create_ctc_layer(name: str, y_pred, input_labels, input_length, label_length) -> Lambda:
return ctc(name, [y_pred, input_labels, input_length, label_length]) | none | 1 | 2.488911 | 2 | |
src/tfexp/cli.py | MArpogaus/tensorflow-experiments | 0 | 6621444 | #!env python3
# AUTHOR INFORMATION ##########################################################
# file : cli.py
# brief : [Description]
#
# author : <NAME>
# date : 2020-04-06 15:21:06
# COPYRIGHT ###################################################################
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTES ######################################################################
#
# This project is following the
# [PEP8 style guide](https://www.python.org/dev/peps/pep-0008/)
#
# CHANGELOG ##################################################################
# modified by : <NAME>
# modified time : 2020-04-06 15:24:14
# changes made : ...
# modified by : <NAME>
# modified time : 2020-04-06 15:21:06
# changes made : newly written
###############################################################################
# REQUIRED PYTHON MODULES #####################################################
import argparse
from .configuration import Configuration
# FUNCTION DEFINITIONS ########################################################
def f_train(args):
cfg = Configuration.from_yaml(args.config)
print(cfg)
(x_train, y_train), (x_test, y_test) = cfg.data.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = cfg.model
# cfg.compile_kwargs['loss']=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False)
model.compile(**cfg.compile_kwargs)
model.fit(x=x_train, y=y_train, **cfg.fit_kwargs)
def f_predict():
pass
def cli():
p = argparse.ArgumentParser(
description='Help me to conduct my experiments')
subparsers = p.add_subparsers()
train_parser = subparsers.add_parser('train', help='train the model')
# Add specific options for option1 here, but here's
train_parser.add_argument('config', type=argparse.FileType(mode='r'))
# train_parser.add_argument('--model', type=tf.keras.Model)
# train_parser.add_argument('--experiment_name', type=str)
# train_parser.add_argument('--model_args', type=list)
# train_parser.add_argument('--model_kwargs', type=dict)
# train_parser.add_argument('--model_compile_kwargs', type=dict)
# train_parser.add_argument('--model_fit_kwargs', type=dict)
# train_parser.add_argument('--save_path', type=str)
# an example
train_parser.set_defaults(func=f_train)
predict_parser = subparsers.add_parser('predict')
# Add specific options for option1 here
predict_parser.set_defaults(func=f_predict)
args = p.parse_args()
args.func(args)
| #!env python3
# AUTHOR INFORMATION ##########################################################
# file : cli.py
# brief : [Description]
#
# author : <NAME>
# date : 2020-04-06 15:21:06
# COPYRIGHT ###################################################################
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTES ######################################################################
#
# This project is following the
# [PEP8 style guide](https://www.python.org/dev/peps/pep-0008/)
#
# CHANGELOG ##################################################################
# modified by : <NAME>
# modified time : 2020-04-06 15:24:14
# changes made : ...
# modified by : <NAME>
# modified time : 2020-04-06 15:21:06
# changes made : newly written
###############################################################################
# REQUIRED PYTHON MODULES #####################################################
import argparse
from .configuration import Configuration
# FUNCTION DEFINITIONS ########################################################
def f_train(args):
cfg = Configuration.from_yaml(args.config)
print(cfg)
(x_train, y_train), (x_test, y_test) = cfg.data.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = cfg.model
# cfg.compile_kwargs['loss']=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False)
model.compile(**cfg.compile_kwargs)
model.fit(x=x_train, y=y_train, **cfg.fit_kwargs)
def f_predict():
pass
def cli():
p = argparse.ArgumentParser(
description='Help me to conduct my experiments')
subparsers = p.add_subparsers()
train_parser = subparsers.add_parser('train', help='train the model')
# Add specific options for option1 here, but here's
train_parser.add_argument('config', type=argparse.FileType(mode='r'))
# train_parser.add_argument('--model', type=tf.keras.Model)
# train_parser.add_argument('--experiment_name', type=str)
# train_parser.add_argument('--model_args', type=list)
# train_parser.add_argument('--model_kwargs', type=dict)
# train_parser.add_argument('--model_compile_kwargs', type=dict)
# train_parser.add_argument('--model_fit_kwargs', type=dict)
# train_parser.add_argument('--save_path', type=str)
# an example
train_parser.set_defaults(func=f_train)
predict_parser = subparsers.add_parser('predict')
# Add specific options for option1 here
predict_parser.set_defaults(func=f_predict)
args = p.parse_args()
args.func(args)
| en | 0.256983 | #!env python3 # AUTHOR INFORMATION ########################################################## # file : cli.py # brief : [Description] # # author : <NAME> # date : 2020-04-06 15:21:06 # COPYRIGHT ################################################################### # Copyright 2020 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # NOTES ###################################################################### # # This project is following the # [PEP8 style guide](https://www.python.org/dev/peps/pep-0008/) # # CHANGELOG ################################################################## # modified by : <NAME> # modified time : 2020-04-06 15:24:14 # changes made : ... # modified by : <NAME> # modified time : 2020-04-06 15:21:06 # changes made : newly written ############################################################################### # REQUIRED PYTHON MODULES ##################################################### # FUNCTION DEFINITIONS ######################################################## # cfg.compile_kwargs['loss']=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False) # Add specific options for option1 here, but here's # train_parser.add_argument('--model', type=tf.keras.Model) # train_parser.add_argument('--experiment_name', type=str) # train_parser.add_argument('--model_args', type=list) # train_parser.add_argument('--model_kwargs', type=dict) # train_parser.add_argument('--model_compile_kwargs', type=dict) # train_parser.add_argument('--model_fit_kwargs', type=dict) # train_parser.add_argument('--save_path', type=str) # an example # Add specific options for option1 here | 1.705909 | 2 |
solax_modbus/sensor.py | Zeppen/homsassistant-solax-modbus | 22 | 6621445 | from homeassistant.const import CONF_NAME
from homeassistant.core import callback
from homeassistant.components.sensor import SensorEntity
import logging
from typing import Optional, Dict, Any
import homeassistant.util.dt as dt_util
from .const import ATTR_MANUFACTURER, DOMAIN, SENSOR_TYPES, GEN3_X1_SENSOR_TYPES, GEN3_X3_SENSOR_TYPES, X1_EPS_SENSOR_TYPES, X3_EPS_SENSOR_TYPES, SolaXModbusSensorEntityDescription
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, entry, async_add_entities):
hub_name = entry.data[CONF_NAME]
hub = hass.data[DOMAIN][hub_name]["hub"]
device_info = {
"identifiers": {(DOMAIN, hub_name)},
"name": hub_name,
"manufacturer": ATTR_MANUFACTURER,
}
entities = []
for sensor_description in SENSOR_TYPES.values():
sensor = SolaXModbusSensor(
hub_name,
hub,
device_info,
sensor_description,
)
entities.append(sensor)
if hub.read_gen3x1 == True:
for sensor_description in GEN3_X1_SENSOR_TYPES.values():
sensor = SolaXModbusSensor(
hub_name,
hub,
device_info,
sensor_description,
)
entities.append(sensor)
if hub.read_gen3x3 == True:
for sensor_description in GEN3_X3_SENSOR_TYPES.values():
sensor = SolaXModbusSensor(
hub_name,
hub,
device_info,
sensor_description,
)
entities.append(sensor)
if hub.read_x1_eps == True:
for sensor_description in X1_EPS_SENSOR_TYPES.values():
sensor = SolaXModbusSensor(
hub_name,
hub,
device_info,
sensor_description,
)
entities.append(sensor)
if hub.read_x3_eps == True:
for sensor_description in X3_EPS_SENSOR_TYPES.values():
sensor = SolaXModbusSensor(
hub_name,
hub,
device_info,
sensor_description,
)
entities.append(sensor)
async_add_entities(entities)
return True
class SolaXModbusSensor(SensorEntity):
"""Representation of an SolaX Modbus sensor."""
def __init__(
self,
platform_name,
hub,
device_info,
description: SolaXModbusSensorEntityDescription,
):
"""Initialize the sensor."""
self._platform_name = platform_name
self._attr_device_info = device_info
self._hub = hub
self.entity_description: SolaXModbusSensorEntityDescription = description
async def async_added_to_hass(self):
"""Register callbacks."""
self._hub.async_add_solax_modbus_sensor(self._modbus_data_updated)
async def async_will_remove_from_hass(self) -> None:
self._hub.async_remove_solax_modbus_sensor(self._modbus_data_updated)
@callback
def _modbus_data_updated(self):
self.async_write_ha_state()
@callback
def _update_state(self):
if self._key in self._hub.data:
self._state = self._hub.data[self._key]
@property
def name(self):
"""Return the name."""
return f"{self._platform_name} {self.entity_description.name}"
@property
def unique_id(self) -> Optional[str]:
return f"{self._platform_name}_{self.entity_description.key}"
@property
def native_value(self):
"""Return the state of the sensor."""
return (
self._hub.data[self.entity_description.key]
if self.entity_description.key in self._hub.data
else None
) | from homeassistant.const import CONF_NAME
from homeassistant.core import callback
from homeassistant.components.sensor import SensorEntity
import logging
from typing import Optional, Dict, Any
import homeassistant.util.dt as dt_util
from .const import ATTR_MANUFACTURER, DOMAIN, SENSOR_TYPES, GEN3_X1_SENSOR_TYPES, GEN3_X3_SENSOR_TYPES, X1_EPS_SENSOR_TYPES, X3_EPS_SENSOR_TYPES, SolaXModbusSensorEntityDescription
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, entry, async_add_entities):
hub_name = entry.data[CONF_NAME]
hub = hass.data[DOMAIN][hub_name]["hub"]
device_info = {
"identifiers": {(DOMAIN, hub_name)},
"name": hub_name,
"manufacturer": ATTR_MANUFACTURER,
}
entities = []
for sensor_description in SENSOR_TYPES.values():
sensor = SolaXModbusSensor(
hub_name,
hub,
device_info,
sensor_description,
)
entities.append(sensor)
if hub.read_gen3x1 == True:
for sensor_description in GEN3_X1_SENSOR_TYPES.values():
sensor = SolaXModbusSensor(
hub_name,
hub,
device_info,
sensor_description,
)
entities.append(sensor)
if hub.read_gen3x3 == True:
for sensor_description in GEN3_X3_SENSOR_TYPES.values():
sensor = SolaXModbusSensor(
hub_name,
hub,
device_info,
sensor_description,
)
entities.append(sensor)
if hub.read_x1_eps == True:
for sensor_description in X1_EPS_SENSOR_TYPES.values():
sensor = SolaXModbusSensor(
hub_name,
hub,
device_info,
sensor_description,
)
entities.append(sensor)
if hub.read_x3_eps == True:
for sensor_description in X3_EPS_SENSOR_TYPES.values():
sensor = SolaXModbusSensor(
hub_name,
hub,
device_info,
sensor_description,
)
entities.append(sensor)
async_add_entities(entities)
return True
class SolaXModbusSensor(SensorEntity):
"""Representation of an SolaX Modbus sensor."""
def __init__(
self,
platform_name,
hub,
device_info,
description: SolaXModbusSensorEntityDescription,
):
"""Initialize the sensor."""
self._platform_name = platform_name
self._attr_device_info = device_info
self._hub = hub
self.entity_description: SolaXModbusSensorEntityDescription = description
async def async_added_to_hass(self):
"""Register callbacks."""
self._hub.async_add_solax_modbus_sensor(self._modbus_data_updated)
async def async_will_remove_from_hass(self) -> None:
self._hub.async_remove_solax_modbus_sensor(self._modbus_data_updated)
@callback
def _modbus_data_updated(self):
self.async_write_ha_state()
@callback
def _update_state(self):
if self._key in self._hub.data:
self._state = self._hub.data[self._key]
@property
def name(self):
"""Return the name."""
return f"{self._platform_name} {self.entity_description.name}"
@property
def unique_id(self) -> Optional[str]:
return f"{self._platform_name}_{self.entity_description.key}"
@property
def native_value(self):
"""Return the state of the sensor."""
return (
self._hub.data[self.entity_description.key]
if self.entity_description.key in self._hub.data
else None
) | en | 0.692296 | Representation of an SolaX Modbus sensor. Initialize the sensor. Register callbacks. Return the name. Return the state of the sensor. | 2.000091 | 2 |
lib/music21/musicxml/fromMxObjects.py | lasconic/randomsheetmusic | 2 | 6621446 | <reponame>lasconic/randomsheetmusic<filename>lib/music21/musicxml/fromMxObjects.py
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: musicxml/fromMxObjects.py
# Purpose: Translate from MusicXML mxObjects to music21 objects
#
# Authors: <NAME>
# <NAME>
#
# Copyright: Copyright © 2010-2013 <NAME> and the music21 Project
# License: LGPL or BSD, see license.txt
#-------------------------------------------------------------------------------
'''
Low-level conversion routines from MusicXML to music21.
This module supposes that the musicxml document has already been parsed by xml.sax (by
base.Document.read() ) and is stored as a collection of mxObjects -- equivalent parsing
methods could be created and fed into `mxScoreToScore` to make this work.
'''
import copy
import pprint
import traceback
import unittest
from music21.musicxml import mxObjects
from music21 import common
from music21 import defaults
from music21 import exceptions21
from music21 import xmlnode
# modules that import this include converter.py.
# thus, cannot import these here
from music21 import articulations
from music21 import bar
from music21 import beam
from music21 import chord
from music21 import clef
from music21 import duration
from music21 import dynamics
from music21 import expressions
from music21 import harmony # for chord symbols
from music21 import instrument
from music21 import interval # for transposing instruments
from music21 import key
from music21 import layout
from music21 import metadata
from music21 import note
from music21 import meter
from music21 import pitch
from music21 import repeat
from music21 import spanner
from music21 import stream
from music21 import tempo
from music21 import text # for text boxes
from music21 import tie
from music21 import environment
_MOD = "musicxml.fromMxObjects"
environLocal = environment.Environment(_MOD)
#-------------------------------------------------------------------------------
class FromMxObjectsException(exceptions21.Music21Exception):
pass
class XMLBarException(FromMxObjectsException):
pass
# def mod6IdLocal(spannerObj):
# '''
# returns the spanner idLocal as a number from 1-6 since
# only 6 spanners of each type can be active at a time in musicxml
#
#
# >>> s = stream.Score()
# >>> for i in range(10):
# ... sp = spanner.Glissando()
# ... sp.idLocal = i + 1
# ... s.insert(0, sp)
# >>> for sp in s.getElementsByClass('Spanner'):
# ... print sp.idLocal, musicxml.fromMxObjects.mod6IdLocal(sp)
# 1 1
# 2 2
# 3 3
# 4 4
# 5 5
# 6 6
# 7 1
# 8 2
# 9 3
# 10 4
# '''
# spanId = spannerObj.idLocal
# if spanId is None:
# return 1
# mod6Id = spanId % 6
# if mod6Id == 0:
# mod6Id = 6
# return mod6Id
def configureStaffGroupFromMxPartGroup(staffGroup, mxPartGroup):
'''
Given an already instantiated spanner.StaffGroup,
configure it with parameters from an mxPartGroup.
'''
staffGroup.name = mxPartGroup.get('groupName')
staffGroup.abbreviation = mxPartGroup.get('groupAbbreviation')
staffGroup.symbol = mxPartGroup.get('groupSymbol')
staffGroup.barTogether = mxPartGroup.get('groupBarline')
staffGroup.completeStatus = True
def mxCreditToTextBox(mxCredit):
'''Convert a MusicXML credit to a music21 TextBox
>>> c = musicxml.mxObjects.Credit()
>>> c.append(musicxml.mxObjects.CreditWords('testing'))
>>> c.set('page', 2)
>>> tb = musicxml.fromMxObjects.mxCreditToTextBox(c)
>>> tb.page
2
>>> tb.content
'testing'
'''
tb = text.TextBox()
tb.page = mxCredit.get('page')
content = []
for mxCreditWords in mxCredit: # can iterate
content.append(mxCreditWords.charData)
if len(content) == 0: # no text defined
raise FromMxObjectsException('no credit words defined for a credit tag')
tb.content = '\n'.join(content) # join with \n
# take formatting from the first, no matter if multiple are defined
tb.positionVertical = mxCredit.componentList[0].get('default-y')
tb.positionHorizontal = mxCredit.componentList[0].get('default-x')
tb.justify = mxCredit.componentList[0].get('justify')
tb.style = mxCredit.componentList[0].get('font-style')
tb.weight = mxCredit.componentList[0].get('font-weight')
tb.size = mxCredit.componentList[0].get('font-size')
tb.alignVertical = mxCredit.componentList[0].get('valign')
tb.alignHorizontal = mxCredit.componentList[0].get('halign')
return tb
def mxTransposeToInterval(mxTranspose):
'''Convert a MusicXML Transpose object to a music21 Interval object.
>>> t = musicxml.mxObjects.Transpose()
>>> t.diatonic = -1
>>> t.chromatic = -2
>>> musicxml.fromMxObjects.mxTransposeToInterval(t)
<music21.interval.Interval M-2>
>>> t = musicxml.mxObjects.Transpose()
>>> t.diatonic = -5
>>> t.chromatic = -9
>>> musicxml.fromMxObjects.mxTransposeToInterval(t)
<music21.interval.Interval M-6>
>>> t = musicxml.mxObjects.Transpose()
>>> t.diatonic = 3 # a type of 4th
>>> t.chromatic = 6
>>> musicxml.fromMxObjects.mxTransposeToInterval(t)
<music21.interval.Interval A4>
'''
ds = None
if mxTranspose.diatonic is not None:
ds = int(mxTranspose.diatonic)
cs = None
if mxTranspose.chromatic is not None:
cs = int(mxTranspose.chromatic)
oc = 0
if mxTranspose.octaveChange is not None:
oc = int(mxTranspose.octaveChange) * 12
# NOTE: presently not dealing with double
# doubled one octave down from what is currently written
# (as is the case for mixed cello / bass parts in orchestral literature)
#environLocal.printDebug(['ds', ds, 'cs', cs, 'oc', oc])
if ds is not None and ds != 0 and cs is not None and cs != 0:
# diatonic step can be used as a generic specifier here if
# shifted 1 away from zero
if ds < 0:
post = interval.intervalFromGenericAndChromatic(ds - 1, cs + oc)
else:
post = interval.intervalFromGenericAndChromatic(ds + 1, cs + oc)
else: # assume we have chromatic; may not be correct spelling
post = interval.Interval(cs + oc)
return post
def mxToTempoIndication(mxMetronome, mxWords=None):
'''
Given an mxMetronome, convert to either a TempoIndication subclass,
either a tempo.MetronomeMark or tempo.MetricModulation.
>>> m = musicxml.mxObjects.Metronome()
>>> bu = musicxml.mxObjects.BeatUnit('half')
>>> pm = musicxml.mxObjects.PerMinute(125)
>>> m.append(bu)
>>> m.append(pm)
>>> musicxml.fromMxObjects.mxToTempoIndication(m)
<music21.tempo.MetronomeMark Half=125.0>
'''
# get lists of durations and texts
durations = []
numbers = []
dActive = None
for mxObj in mxMetronome.componentList:
if isinstance(mxObj, mxObjects.BeatUnit):
durationType = musicXMLTypeToType(mxObj.charData)
dActive = duration.Duration(type=durationType)
durations.append(dActive)
if isinstance(mxObj, mxObjects.BeatUnitDot):
if dActive is None:
raise FromMxObjectsException('encountered metronome components out of order')
dActive.dots += 1 # add one dot each time these are encountered
# should come last
if isinstance(mxObj, mxObjects.PerMinute):
#environLocal.printDebug(['found PerMinute', mxObj])
# store as a number
if mxObj.charData != '':
numbers.append(float(mxObj.charData))
if mxMetronome.isMetricModulation():
mm = tempo.MetricModulation()
#environLocal.printDebug(['found metric modulaton:', 'durations', durations])
if len(durations) < 2:
raise FromMxObjectsException('found incompletely specified musicxml metric moduation: '+
'fewer than two durations defined')
# all we have are referents, no values are defined in musicxml
# will need to update context after adding to Stream
mm.oldReferent = durations[0]
mm.newReferent = durations[1]
else:
#environLocal.printDebug(['found metronome mark:', 'numbers', numbers])
mm = tempo.MetronomeMark()
if len(numbers) > 0:
mm.number = numbers[0]
if len(durations) > 0:
mm.referent = durations[0]
# TODO: set text if defined in words
if mxWords is not None:
pass
paren = mxMetronome.get('parentheses')
if paren is not None:
if paren in ['yes']:
mm.parentheses = True
return mm
def mxToRepeat(mxBarline, inputM21=None):
'''
Given an mxBarline (not an mxRepeat object) with repeatObj as a parameter,
file the necessary parameters and return a bar.Repeat() object
>>> mxRepeat = musicxml.mxObjects.Repeat()
>>> mxRepeat.set('direction', 'backward')
>>> mxRepeat.get('times') == None
True
>>> mxBarline = musicxml.mxObjects.Barline()
>>> mxBarline.set('barStyle', 'light-heavy')
>>> mxBarline.set('repeatObj', mxRepeat)
>>> b = musicxml.fromMxObjects.mxToRepeat(mxBarline)
>>> b
<music21.bar.Repeat direction=end>
Test that the music21 style for a backwards repeat is called "final"
(because it resembles a final barline) but that the musicxml style
is called light-heavy.
>>> b.style
'final'
>>> b.direction
'end'
>>> mxBarline2 = musicxml.toMxObjects.repeatToMx(b)
>>> mxBarline2.get('barStyle')
'light-heavy'
'''
if inputM21 is None:
r = bar.Repeat()
else:
r = inputM21
r.style = mxBarline.get('barStyle')
location = mxBarline.get('location')
if location is not None:
r.location = location
mxRepeat = mxBarline.get('repeatObj')
if mxRepeat is None:
raise bar.BarException('attempting to create a Repeat from an MusicXML bar that does not ' +
'define a repeat')
mxDirection = mxRepeat.get('direction')
#environLocal.printDebug(['mxRepeat', mxRepeat, mxRepeat._attr])
if mxDirection.lower() == 'forward':
r.direction = 'start'
elif mxDirection.lower() == 'backward':
r.direction = 'end'
else:
raise bar.BarException('cannot handle mx direction format:', mxDirection)
if mxRepeat.get('times') != None:
# make into a number
r.times = int(mxRepeat.get('times'))
if inputM21 is None:
return r
def mxToBarline(mxBarline, inputM21 = None):
'''Given an mxBarline, fill the necessary parameters
>>> mxBarline = musicxml.mxObjects.Barline()
>>> mxBarline.set('barStyle', 'light-light')
>>> mxBarline.set('location', 'right')
>>> b = musicxml.fromMxObjects.mxToBarline(mxBarline)
>>> b.style # different in music21 than musicxml
'double'
>>> b.location
'right'
'''
if inputM21 is None:
b = bar.Barline()
else:
b = inputM21
b.style = mxBarline.get('barStyle')
location = mxBarline.get('location')
if location is not None:
b.location = location
if inputM21 is None:
return b
#-------------------------------------------------------------------------------
def mxGraceToGrace(noteOrChord, mxGrace=None):
'''
Given a completely formed, non-grace Note or Chord, create and
return a m21 grace version of the same.
If mxGrace is None, no change is made and the same object is returned.
'''
if mxGrace is None:
return noteOrChord
post = noteOrChord.getGrace()
if mxGrace.get('slash') in ['yes', None]:
post.duration.slash = True
else:
post.duration.slash = False
post.duration.stealTimePrevious = mxGrace.get('steal-time-previous')
post.duration.stealTimeFollowing = mxGrace.get('steal-time-following')
return post
#-------------------------------------------------------------------------------
# Pitch and pitch components
def mxToAccidental(mxAccidental, inputM21Object = None):
'''
>>> a = musicxml.mxObjects.Accidental()
>>> a.set('content', 'half-flat')
>>> a.get('content')
'half-flat'
>>> b = pitch.Accidental()
>>> bReference = musicxml.fromMxObjects.mxToAccidental(a, b)
>>> b is bReference
True
>>> b.name
'half-flat'
>>> b.alter
-0.5
'''
if inputM21Object == None:
acc = pitch.Accidental()
else:
acc = inputM21Object
mxName = mxAccidental.get('charData')
if mxName == "quarter-sharp":
name = "half-sharp"
elif mxName == "three-quarters-sharp":
name = "one-and-a-half-sharp"
elif mxName == "quarter-flat":
name = "half-flat"
elif mxName == "three-quarters-flat":
name = "one-and-a-half-flat"
elif mxName == "flat-flat":
name = "double-flat"
elif mxName == "sharp-sharp":
name = "double-sharp"
else:
name = mxName
# need to use set here to get all attributes up to date
acc.set(name)
return acc
def mxToPitch(mxNote, inputM21=None):
'''
Given a MusicXML Note object, set this Pitch object to its values.
>>> b = musicxml.mxObjects.Pitch()
>>> b.set('octave', 3)
>>> b.set('step', 'E')
>>> b.set('alter', -1)
>>> c = musicxml.mxObjects.Note()
>>> c.set('pitch', b)
>>> a = pitch.Pitch('g#4')
>>> a = musicxml.fromMxObjects.mxToPitch(c)
>>> print(a)
E-3
'''
if inputM21 == None:
p = pitch.Pitch()
else:
p = inputM21
# assume this is an object
mxPitch = mxNote.get('pitchObj')
mxAccidental = mxNote.get('accidentalObj')
p.step = mxPitch.get('step')
# sometimes we have an accidental defined but no alter value, due to
# a natural; need to look at mxAccidental directly
mxAccidentalCharData = None
if mxAccidental != None:
mxAccidentalCharData = mxAccidental.get('charData')
#environLocal.printDebug(['found mxAccidental charData', mxAccidentalCharData])
acc = mxPitch.get('alter')
# None is used in musicxml but not in music21
if acc != None or mxAccidentalCharData != None:
if mxAccidental is not None: # the source had wanted to show alter
try:
accObj = mxToAccidental(mxAccidental)
# used to to just use acc value
# self.accidental = Accidental(float(acc))
# better to use accObj if possible
p.accidental = accObj
p.accidental.displayStatus = True
except pitch.AccidentalException:
# MuseScore 0.9.6 generates Accidentals with empty objects
pass
else:
# here we generate an accidental object from the alter value
# but in the source, there was not a defined accidental
try:
p.accidental = pitch.Accidental(float(acc))
except pitch.AccidentalException:
raise FromMxObjectsException('incorrect accidental %s for pitch %s' % (str(acc), p))
p.accidental.displayStatus = False
p.octave = int(mxPitch.get('octave'))
return p
#-------------------------------------------------------------------------------
# Ties
def mxToTie(mxNote, inputM21=None):
'''
Translate a MusicXML :class:`~music21.musicxml.mxObjects.Note` (sic!) to a music21
:class:`~music21.tie.Tie` object according to its <tieList> parameter.
Only called if the mxObjects.Note has a tieList that is not blank, so as not to
create additional ties.
'''
if inputM21 == None:
t = tie.Tie()
else:
t = inputM21
mxTieList = mxNote.get('tieList')
if len(mxTieList) > 0:
# get all types and see what we have for this note
typesFound = []
for mxTie in mxTieList:
typesFound.append(mxTie.get('type'))
# trivial case: have only 1
if len(typesFound) == 1:
t.type = typesFound[0]
elif typesFound == ['stop', 'start']:
t.type = 'continue'
#self.type = 'start'
else:
environLocal.printDebug(['found unexpected arrangement of multiple tie types when ' +
'importing from musicxml:', typesFound])
# from old note.py code
# not sure this is necessary
# mxNotations = mxNote.get('notations')
# if mxNotations != None:
# mxTiedList = mxNotations.getTieds()
# should be sufficient to only get mxTieList
if inputM21 is None:
return t
#-------------------------------------------------------------------------------
# Lyrics
def mxToLyric(mxLyric, inputM21=None):
'''
Translate a MusicXML :class:`~music21.musicxml.mxObjects.Lyric` object to a
music21 :class:`~music21.note.Lyric` object.
If inputM21 is a :class:`~music21.note.Lyric` object, then the values of the
mxLyric are transfered there and nothing returned.
Otherwise, a new `Lyric` object is created and returned.
>>> mxLyric = musicxml.mxObjects.Lyric()
>>> mxLyric.set('text', 'word')
>>> mxLyric.set('number', 4)
>>> mxLyric.set('syllabic', 'single')
>>> lyricObj = note.Lyric()
>>> musicxml.fromMxObjects.mxToLyric(mxLyric, lyricObj)
>>> lyricObj
<music21.note.Lyric number=4 syllabic=single text="word">
Non-numeric MusicXML lyric "number"s are converted to identifiers:
>>> mxLyric.set('number', 'part2verse1')
>>> l2 = musicxml.fromMxObjects.mxToLyric(mxLyric)
>>> l2
<music21.note.Lyric number=0 identifier="part2verse1" syllabic=single text="word">
'''
if inputM21 is None:
l = note.Lyric()
else:
l = inputM21
l.text = mxLyric.get('text')
# This is new to account for identifiers
number = mxLyric.get('number')
if common.isNum(number):
l.number = number
else:
l.number = 0 #If musicXML lyric number is not a number, set it to 0. This tells the caller of
#mxToLyric that a new number needs to be given based on the lyrics context amongst other lyrics.
l.identifier = number
# Used to be l.number = mxLyric.get('number')
l.syllabic = mxLyric.get('syllabic')
if inputM21 is None:
return l
#-------------------------------------------------------------------------------
# Durations
def musicXMLTypeToType(value):
'''
Utility function to convert a MusicXML duration type to an music21 duration type.
Changes 'long' to 'longa' and deals with a Guitar Pro 5.2 bug in MusicXML
export, that exports a 32nd note with the type '32th'.
>>> musicxml.fromMxObjects.musicXMLTypeToType('long')
'longa'
>>> musicxml.fromMxObjects.musicXMLTypeToType('32th')
'32nd'
>>> musicxml.fromMxObjects.musicXMLTypeToType('quarter')
'quarter'
>>> musicxml.fromMxObjects.musicXMLTypeToType(None)
Traceback (most recent call last):
FromMxObjectsException...
'''
# MusicXML uses long instead of longa
if value not in duration.typeToDuration:
if value == 'long':
return 'longa'
elif value == '32th':
return '32nd'
else:
raise FromMxObjectsException('found unknown MusicXML type: %s' % value)
else:
return value
def mxToDuration(mxNote, inputM21=None):
'''
Translate a `MusicXML` :class:`~music21.musicxml.mxObjects.Note` object
to a music21 :class:`~music21.duration.Duration` object.
::
>>> a = musicxml.mxObjects.Note()
>>> a.setDefaults()
>>> m = musicxml.mxObjects.Measure()
>>> m.setDefaults()
>>> a.external['measure'] = m # assign measure for divisions ref
>>> a.external['divisions'] = m.external['divisions']
>>> c = duration.Duration()
>>> musicxml.fromMxObjects.mxToDuration(a, c)
<music21.duration.Duration 1.0>
>>> c.quarterLength
1.0
'''
if inputM21 == None:
d = duration.Duration()
else:
d = inputM21
if mxNote.external['measure'] == None:
raise FromMxObjectsException(
"cannot determine MusicXML duration without a reference to a measure (%s)" % mxNote)
mxDivisions = mxNote.external['divisions']
if mxNote.duration is not None:
if mxNote.get('type') is not None:
durationType = musicXMLTypeToType(mxNote.get('type'))
forceRaw = False
else: # some rests do not define type, and only define duration
durationType = None # no type to get, must use raw
forceRaw = True
mxDotList = mxNote.get('dotList')
# divide mxNote duration count by divisions to get qL
qLen = float(mxNote.duration) / float(mxDivisions)
# mxNotations = mxNote.get('notationsObj')
mxTimeModification = mxNote.get('timeModificationObj')
if mxTimeModification is not None:
tup = mxToTuplet(mxNote)
# get all necessary config from mxNote
#environLocal.printDebug(['created Tuplet', tup])
# need to see if there is more than one component
#self.components[0]._tuplets.append(tup)
else:
tup = None
# two ways to create durations, raw and cooked
if forceRaw:
#environLocal.printDebug(['forced to use raw duration', durRaw])
durRaw = duration.Duration() # raw just uses qLen
# the qLen set here may not be computable, but is not immediately
# computed until setting components
durRaw.quarterLength = qLen
try:
d.components = durRaw.components
except duration.DurationException:
environLocal.warn(['mxToDuration', 'supplying quarterLength of 1 as type is not ' +
'defined and raw quarterlength (%s) is not a computable duration' % qLen])
environLocal.printDebug(['mxToDuration', 'raw qLen', qLen, durationType,
'mxNote.duration:', mxNote.duration,
'last mxDivisions:', mxDivisions])
durRaw.quarterLength = 1.
else: # a cooked version builds up from pieces
durUnit = duration.DurationUnit()
durUnit.type = durationType
durUnit.dots = len(mxDotList)
if not tup == None:
durUnit.appendTuplet(tup)
durCooked = duration.Duration(components=[durUnit])
if durUnit.quarterLength != durCooked.quarterLength:
environLocal.printDebug(['error in stored MusicXML representaiton and ' +
'duration value', durCooked])
# old way just used qLen
#self.quarterLength = qLen
d.components = durCooked.components
# if mxNote.duration is None, this is a grace note, and duration
# is based entirely on type
if mxNote.duration is None:
durUnit = duration.DurationUnit()
durUnit.type = musicXMLTypeToType(mxNote.get('type'))
durUnit.dots = len(mxNote.get('dotList'))
d.components = [durUnit]
#environLocal.printDebug(['got mx duration of None', d])
return d
def mxToOffset(mxDirection, mxDivisions):
'''
Translate a MusicXML :class:`~music21.musicxml.mxObjects.Direction`
with an offset value to an offset in music21.
'''
if mxDivisions is None:
raise FromMxObjectsException(
"cannot determine MusicXML duration without a reference to a measure (%s)" % mxDirection)
if mxDirection.offset is None:
return 0.0
else:
#environLocal.printDebug(['mxDirection.offset', mxDirection.offset, 'mxDivisions', mxDivisions])
return float(mxDirection.offset) / float(mxDivisions)
def mxToTuplet(mxNote, inputM21Object = None):
'''
Given an mxNote, based on mxTimeModification
and mxTuplet objects, return a Tuplet object
(or alter the input object and then return it)
'''
if inputM21Object is None:
t = duration.Tuplet()
else:
t = inputM21Object
if t.frozen is True:
raise duration.TupletException("A frozen tuplet (or one attached to a duration) " +
"is immutable")
mxTimeModification = mxNote.get('timeModificationObj')
#environLocal.printDebug(['got mxTimeModification', mxTimeModification])
t.numberNotesActual = int(mxTimeModification.get('actual-notes'))
t.numberNotesNormal = int(mxTimeModification.get('normal-notes'))
mxNormalType = mxTimeModification.get('normal-type')
# TODO: implement dot
# mxNormalDot = mxTimeModification.get('normal-dot')
if mxNormalType != None:
# this value does not seem to frequently be supplied by mxl
# encodings, unless it is different from the main duration
# this sets both actual and noraml types to the same type
t.setDurationType(musicXMLTypeToType(
mxTimeModification.get('normal-type')))
else: # set to type of duration
t.setDurationType(musicXMLTypeToType(mxNote.get('type')))
mxNotations = mxNote.get('notationsObj')
#environLocal.printDebug(['got mxNotations', mxNotations])
if mxNotations != None and len(mxNotations.getTuplets()) > 0:
mxTuplet = mxNotations.getTuplets()[0] # a list, but only use first
#environLocal.printDebug(['got mxTuplet', mxTuplet])
t.type = mxTuplet.get('type')
t.bracket = mxObjects.yesNoToBoolean(mxTuplet.get('bracket'))
#environLocal.printDebug(['got bracket', self.bracket])
t.placement = mxTuplet.get('placement')
return t
#-------------------------------------------------------------------------------
# Meters
def mxToTimeSignature(mxTimeList, inputM21=None):
'''
Given an mxTimeList, load this object
if inputM21 is None, create a new TimeSignature
and return it.
>>> mxTime = musicxml.mxObjects.Time()
>>> mxTime.setDefaults()
>>> mxAttributes = musicxml.mxObjects.Attributes()
>>> mxAttributes.timeList.append(mxTime)
>>> ts = meter.TimeSignature()
>>> musicxml.fromMxObjects.mxToTimeSignature(mxAttributes.timeList, ts)
>>> ts.numerator
4
'''
if inputM21 is None:
ts = meter.TimeSignature()
else:
ts = inputM21
if not common.isListLike(mxTimeList): # if just one
mxTime = mxTimeList
else: # there may be more than one if we have more staffs per part
mxTime = mxTimeList[0]
n = []
d = []
for obj in mxTime.componentList:
if isinstance(obj, mxObjects.Beats):
n.append(obj.charData) # may be 3+2
if isinstance(obj, mxObjects.BeatType):
d.append(obj.charData)
#n = mxTime.get('beats')
#d = mxTime.get('beat-type')
# convert into a string
msg = []
for i in range(len(n)):
msg.append('%s/%s' % (n[i], d[i]))
#environLocal.printDebug(['loading meter string:', '+'.join(msg)])
ts.load('+'.join(msg))
if inputM21 is None:
return ts
#--------------------------------------------------------
# Key/KeySignatures
def mxKeyListToKeySignature(mxKeyList, inputM21 = None):
'''
Given a mxKey object or keyList, return a music21.key.KeySignature
object and return it, or if inputM21 is None, change its
attributes and return nothing.
>>> mxk = musicxml.mxObjects.Key()
>>> mxk.set('fifths', 5)
>>> ks = key.KeySignature()
>>> musicxml.fromMxObjects.mxKeyListToKeySignature(mxk, ks)
>>> ks.sharps
5
Or just get a new KeySignature object from scratch:
>>> mxk.set('fifths', -2)
>>> ks2 = musicxml.fromMxObjects.mxKeyListToKeySignature(mxk)
>>> ks2
<music21.key.KeySignature of 2 flats>
'''
if inputM21 is None:
ks = key.KeySignature()
else:
ks = inputM21
if not common.isListLike(mxKeyList):
mxKey = mxKeyList
else: # there may be more than one if we have more staffs per part
mxKey = mxKeyList[0]
fifths = mxKey.get('fifths')
if fifths is None:
fifths = 0
ks.sharps = int(fifths)
mxMode = mxKey.get('mode')
if mxMode != None:
ks.mode = mxMode
if inputM21 is None:
return ks
#--------------------------------------------------------
# clefs
def mxClefToClef(mxClefList, inputM21 = None):
'''
Given a MusicXML Clef object, return a music21
Clef object
>>> a = musicxml.mxObjects.Clef()
>>> a.set('sign', 'G')
>>> a.set('line', 2)
>>> b = clef.Clef()
>>> b
<music21.clef.Clef>
>>> 'TrebleClef' in b.classes
False
>>> musicxml.fromMxObjects.mxClefToClef(a, b)
>>> b.sign
'G'
>>> 'TrebleClef' in b.classes
True
>>> b
<music21.clef.TrebleClef>
Create a new clef from thin air:
>>> a = musicxml.mxObjects.Clef()
>>> a.set('sign', 'TAB')
>>> c = musicxml.fromMxObjects.mxClefToClef(a)
>>> c
<music21.clef.TabClef>
'''
if not common.isListLike(mxClefList):
mxClef = mxClefList # its not a list
else: # just get first for now
mxClef = mxClefList[0]
sign = mxClef.get('sign')
if sign in ['TAB', 'percussion', 'none']:
clefObj = clef.clefFromString(sign)
else:
line = mxClef.get('line')
mxOctaveChange = mxClef.get('clefOctaveChange')
if mxOctaveChange != None:
octaveChange = int(mxOctaveChange)
else:
octaveChange = 0
clefObj = clef.clefFromString(sign + str(line), octaveChange)
if inputM21 is None:
return clefObj
else:
inputM21._classes = None
inputM21.__class__ = clefObj.__class__
inputM21.sign = clefObj.sign
inputM21.line = clefObj.line
inputM21.octaveChange = clefObj.octaveChange
#-------------------------------------------------------------------------------
# Dynamics
def mxToDynamicList(mxDirection):
'''
Given an mxDirection, load instance
>>> mxDirection = musicxml.mxObjects.Direction()
>>> mxDirectionType = musicxml.mxObjects.DirectionType()
>>> mxDynamicMark = musicxml.mxObjects.DynamicMark('ff')
>>> mxDynamics = musicxml.mxObjects.Dynamics()
>>> mxDynamics.set('default-y', -20)
>>> mxDynamics.append(mxDynamicMark)
>>> mxDirectionType.append(mxDynamics)
>>> mxDirection.append(mxDirectionType)
>>> a = dynamics.Dynamic()
>>> a = musicxml.fromMxObjects.mxToDynamicList(mxDirection)[0]
>>> a.value
'ff'
>>> a.englishName
'very loud'
>>> a._positionDefaultY
-20
'''
# can probably replace this with mxDirection.getDynamicMark()
# need to test
mxDynamics = None
for mxObj in mxDirection:
if isinstance(mxObj, mxObjects.DirectionType):
for mxObjSub in mxObj:
if isinstance(mxObjSub, mxObjects.Dynamics):
mxDynamics = mxObjSub
if mxDynamics == None:
raise dynamics.DynamicException('when importing a Dynamics object from MusicXML, ' +
'did not find a DynamicMark')
# if len(mxDynamics) > 1:
# raise dynamics.DynamicException('when importing a Dynamics object from MusicXML, '
# 'found more than one DynamicMark contained, namely %s' %
# str(mxDynamics))
post = []
for sub in mxDynamics.componentList:
d = dynamics.Dynamic()
# palcement is found in outermost object
if mxDirection.get('placement') is not None:
d._positionPlacement = mxDirection.get('placement')
# the tag is the dynamic mark value
mxDynamicMark = sub.get('tag')
d.value = mxDynamicMark
for dst, src in [('_positionDefaultX', 'default-x'),
('_positionDefaultY', 'default-y'),
('_positionRelativeX', 'relative-x'),
('_positionRelativeY', 'relative-y')]:
if mxDynamics.get(src) is not None:
setattr(d, dst, mxDynamics.get(src))
post.append(d)
return post
def mxToTextExpression(mxDirection):
'''
Given an mxDirection, create one or more TextExpressions
'''
post = []
mxWordsList = mxDirection.getWords()
for mxWords in mxWordsList:
#environLocal.printDebug(['mxToTextExpression()', mxWords, mxWords.charData])
# content can be passed with creation argument
te = expressions.TextExpression(mxWords.charData)
te.justify = mxWords.get('justify')
te.size = mxWords.get('font-size')
te.letterSpacing = mxWords.get('letter-spacing')
te.enclosure = mxWords.get('enclosure')
te.positionVertical = mxWords.get('default-y')
# two parameters that are combined
style = mxWords.get('font-style')
if style == 'normal':
style = None
weight = mxWords.get('font-weight')
if weight == 'normal':
weight = None
if style is not None and weight is not None:
if style == 'italic' and weight == 'bold':
te.style = 'bolditalic'
# one is None
elif style == 'italic':
te.style = 'italic'
elif weight == 'bold':
te.style = 'bold'
post.append(te)
return post
def mxToCoda(mxCoda):
'''
Translate a MusicXML :class:`~music21.musicxml.mxObjects.Coda` object
to a music21 :class:`~music21.repeat.Coda` object.
'''
rm = repeat.Coda()
rm._positionDefaultX = mxCoda.get('default-x')
rm._positionDefaultY = mxCoda.get('default-y')
return rm
def mxToSegno(mxCoda):
'''
Translate a MusicXML :class:`~music21.musicxml.mxObjects.Coda` object
to a music21 :class:`~music21.repeat.Coda` object.
'''
rm = repeat.Segno()
rm._positionDefaultX = mxCoda.get('default-x')
rm._positionDefaultY = mxCoda.get('default-y')
return rm
def mxToRepeatExpression(mxDirection):
'''
Given an mxDirection that may define a coda, segno, or other repeat
expression statement, realize the appropriate music21 object.
'''
pass
# note: this may not be needed, as mx text expressions are converted to repeat objects in measure processing
#-------------------------------------------------------------------------------
# Harmony
def mxToChordSymbol(mxHarmony):
'''
Convert a musicxml.mxObjects.Harmony() object to a harmony.ChordSymbol object:
::
>>> mxHarmony = musicxml.mxObjects.Harmony()
>>> mxKind = musicxml.mxObjects.Kind()
>>> mxKind.charData = 'major-seventh'
>>> mxHarmony.kindObj = mxKind
>>> mxRoot = musicxml.mxObjects.Root()
>>> mxRoot.set('root-step', 'D')
>>> mxRoot.set('root-alter', '-1')
>>> mxHarmony.rootObj = mxRoot
>>> cs = musicxml.fromMxObjects.mxToChordSymbol(mxHarmony)
>>> cs
<music21.harmony.ChordSymbol D-maj7>
::
>>> cs.figure
'D-maj7'
::
>>> cs.pitches
(<music21.pitch.Pitch D-3>, <music21.pitch.Pitch F3>, <music21.pitch.Pitch A-3>, <music21.pitch.Pitch C4>)
::
>>> cs.root()
<music21.pitch.Pitch D-3>
TODO: this is very classically-oriented. Make it more Jazz/Rock like.
::
>>> mxKind.charData = 'major-sixth'
>>> cs = musicxml.fromMxObjects.mxToChordSymbol(mxHarmony)
>>> cs
<music21.harmony.ChordSymbol D-6>
::
>>> cs.figure
'D-6'
::
>>> cs.pitches
(<music21.pitch.Pitch D-3>, <music21.pitch.Pitch F3>, <music21.pitch.Pitch A-3>, <music21.pitch.Pitch B-3>)
::
>>> cs.root()
<music21.pitch.Pitch D-3>
'''
#environLocal.printDebug(['mxToChordSymbol():', mxHarmony])
cs = harmony.ChordSymbol()
mxKind = mxHarmony.get('kind')
if mxKind is not None:
cs.chordKind = mxKind.charData
mxKindText = mxKind.get('text')
if mxKindText is not None:
cs.chordKindStr = mxKindText
mxRoot = mxHarmony.get('root')
if mxRoot is not None:
r = pitch.Pitch(mxRoot.get('rootStep'))
if mxRoot.get('rootAlter') is not None:
# can provide integer to create accidental on pitch
r.accidental = pitch.Accidental(int(mxRoot.get('rootAlter')))
# set Pitch object on Harmony
cs.root(r)
mxBass = mxHarmony.get('bass')
if mxBass is not None:
b = pitch.Pitch(mxBass.get('bassStep'))
if mxBass.get('bassAlter') is not None:
# can provide integer to create accidental on pitch
b.accidental = pitch.Accidental(int(mxBass.get('bassAlter')))
# set Pitch object on Harmony
cs.bass(b)
else:
cs.bass(r) #set the bass to the root if root is none
mxInversion = mxHarmony.get('inversion')
if mxInversion is not None:
cs.inversion(int(mxInversion), transposeOnSet=False) # must be an int
mxFunction = mxHarmony.get('function')
if mxFunction is not None:
cs.romanNumeral = mxFunction # goes to roman property
mxDegree = mxHarmony.get('degree')
if mxDegree is not None: # a list of components
ChordStepModifications = []
hd = None
for mxSub in mxDegree.componentList:
# this is the assumed order of triples
if isinstance(mxSub, mxObjects.DegreeValue):
if hd is not None: # already set
ChordStepModifications.append(hd)
hd = None
if hd is None:
hd = harmony.ChordStepModification()
hd.degree = int(mxSub.charData)
elif isinstance(mxSub, mxObjects.DegreeAlter):
hd.interval = int(mxSub.charData)
elif isinstance(mxSub, mxObjects.DegreeType):
hd.modType = mxSub.charData
else:
raise FromMxObjectsException('found unexpected object in degree tag: %s' % mxSub)
# must get last on loop exit
if hd is not None:
ChordStepModifications.append(hd)
for hd in ChordStepModifications:
cs.addChordStepModification(hd)
cs._updatePitches()
#environLocal.printDebug(['mxToHarmony(): Harmony object', h])
if cs.root().name != r.name:
cs.root(r)
return cs
#-------------------------------------------------------------------------------
# Instruments
def mxToInstrument(mxScorePart, inputM21=None):
'''
Return a generic instrument.Instrument object from this mxScorePart
'''
# note: transposition values is not set in this operation, but in
# mxToStreamPart
if inputM21 is None:
i = instrument.Instrument()
else:
i = inputM21
def _cleanStr(badStr):
# need to remove badly-formed strings
if badStr is None:
return None
badStr = badStr.strip()
goodStr = badStr.replace('\n', ' ')
return goodStr
i.partId = _cleanStr(mxScorePart.get('id'))
i.partName = _cleanStr(mxScorePart.get('partName'))
i.partAbbreviation = _cleanStr(mxScorePart.get('partAbbreviation'))
# for now, just get first instrument
if len(mxScorePart.scoreInstrumentList) > 0:
mxScoreInstrument = mxScorePart.scoreInstrumentList[0]
i.instrumentName = _cleanStr(mxScoreInstrument.get('instrumentName'))
i.instrumentAbbreviation = _cleanStr(mxScoreInstrument.get(
'instrumentAbbreviation'))
if len(mxScorePart.midiInstrumentList) > 0:
# for now, just get first midi instrument
mxMIDIInstrument = mxScorePart.midiInstrumentList[0]
# musicxml counts from 1, not zero
mp = mxMIDIInstrument.get('midiProgram')
if mp is not None:
i.midiProgram = int(mp) - 1
mc = mxMIDIInstrument.get('midiChannel')
if mc is not None:
i.midiChannel = int(mc) - 1
if inputM21 is None:
return i
#-------------------------------------------------------------------------------
# unified processors for Chords and Notes
def mxNotationsToSpanners(target, mxNotations, spannerBundle):
'''
General routines for gathering spanners from notes via mxNotations objects and placing them
in a spanner bundle.
Spanners may be found in musicXML notations and directions objects.
The passed-in spannerBundle will be edited in-place; existing spanners may be completed, or
new spanners may be added.
The `target` object is a reference to the relevant music21 object this spanner is associated
with.
'''
mxSlurList = mxNotations.getSlurs()
for mxObj in mxSlurList:
# look at all spanners and see if we have an open, matching
# slur to place this in
idFound = mxObj.get('number')
# returns a new spanner bundle with just the result of the search
#environLocal.printDebug(['spanner bundle: getByCompleteStatus(False)', spannerBundle.getByCompleteStatus(False)])
#sb = spannerBundle.getByIdLocal(idFound).getByCompleteStatus(False)
sb = spannerBundle.getByClassIdLocalComplete('Slur', idFound, False)
if len(sb) > 0: # if we already have a slur
#environLocal.printDebug(['found a match in SpannerBundle'])
su = sb[0] # get the first
else: # create a new slur
su = spanner.Slur()
su.idLocal = idFound
su.placement = mxObj.get('placement')
spannerBundle.append(su)
# add a reference of this note to this spanner
su.addSpannedElements(target)
#environLocal.printDebug(['adding n', n, id(n), 'su.getSpannedElements', su.getSpannedElements(), su.getSpannedElementIds()])
if mxObj.get('type') == 'stop':
su.completeStatus = True
# only add after complete
mxWavyLineList = mxNotations.getWavyLines()
for mxObj in mxWavyLineList:
#environLocal.printDebug(['waveyLines', mxObj])
idFound = mxObj.get('number')
sb = spannerBundle.getByClassIdLocalComplete('TrillExtension',
idFound, False)
if len(sb) > 0: # if we already have
su = sb[0] # get the first
else: # create a new spanner
su = expressions.TrillExtension()
su.idLocal = idFound
su.placement = mxObj.get('placement')
spannerBundle.append(su)
# add a reference of this note to this spanner
su.addSpannedElements(target)
if mxObj.get('type') == 'stop':
su.completeStatus = True
# only add after complete
mxTremoloList = mxNotations.getTremolos()
for mxObj in mxTremoloList:
environLocal.printDebug(['mxTremoloList', mxObj])
idFound = mxObj.get('number')
sb = spannerBundle.getByClassIdLocalComplete('Tremolo',
idFound, False)
if len(sb) > 0: # if we already have
su = sb[0] # get the first
else: # create a new spanner
environLocal.printDebug(['creating Tremolo'])
su = expressions.Tremolo()
su.idLocal = idFound
#su.placement = mxObj.get('placement')
spannerBundle.append(su)
# add a reference of this note to this spanner
su.addSpannedElements(target)
# can be stop or None; we can have empty single-element tremolo
if mxObj.get('type') in ['stop', None]:
su.completeStatus = True
# only add after complete
mxGlissandoList = mxNotations.getGlissandi()
for mxObj in mxGlissandoList:
idFound = mxObj.get('number')
sb = spannerBundle.getByClassIdLocalComplete('Glissando',
idFound, False)
if len(sb) > 0: # if we already have
su = sb[0] # get the first
else: # create a new spanner
su = spanner.Glissando()
su.idLocal = idFound
su.lineType = mxObj.get('line-type')
spannerBundle.append(su)
# add a reference of this note to this spanner
su.addSpannedElements(target)
if mxObj.get('type') == 'stop':
su.completeStatus = True
# only add after complete
def mxDirectionToSpanners(targetLast, mxDirection, spannerBundle):
'''Some spanners, such as MusicXML octave-shift, are encoded as MusicXML directions.
'''
mxWedge = mxDirection.getWedge()
if mxWedge is not None:
mxType = mxWedge.get('type')
idFound = mxWedge.get('number')
#environLocal.printDebug(['mxDirectionToSpanners', 'found mxWedge', mxType, idFound])
if mxType == 'crescendo':
sp = dynamics.Crescendo()
sp.idLocal = idFound
spannerBundle.append(sp)
# define this spanner as needing component assignment from
# the next general note
spannerBundle.setPendingSpannedElementAssignment(sp, 'GeneralNote')
elif mxType == 'diminuendo':
sp = dynamics.Diminuendo()
sp.idLocal = idFound
spannerBundle.append(sp)
spannerBundle.setPendingSpannedElementAssignment(sp, 'GeneralNote')
elif mxType == 'stop':
# need to retrieve an existing spanner
# try to get base class of both Crescendo and Decrescendo
sp = spannerBundle.getByClassIdLocalComplete('DynamicWedge',
idFound, False)[0] # get first
sp.completeStatus = True
# will only have a target if this follows the note
if targetLast is not None:
sp.addSpannedElements(targetLast)
else:
raise FromMxObjectsException('unidentified mxType of mxWedge:', mxType)
mxBracket = mxDirection.getBracket()
if mxBracket is not None:
mxType = mxBracket.get('type')
idFound = mxBracket.get('number')
#environLocal.printDebug(['mxDirectionToSpanners', 'found mxBracket', mxType, idFound])
if mxType == 'start':
sp = spanner.Line()
sp.idLocal = idFound
sp.startTick = mxBracket.get('line-end')
sp.startHeight = mxBracket.get('end-length')
sp.lineType = mxBracket.get('line-type')
spannerBundle.append(sp)
# define this spanner as needing component assignment from
# the next general note
spannerBundle.setPendingSpannedElementAssignment(sp, 'GeneralNote')
elif mxType == 'stop':
# need to retrieve an existing spanner
# try to get base class of both Crescendo and Decrescendo
sp = spannerBundle.getByClassIdLocalComplete('Line',
idFound, False)[0] # get first
sp.completeStatus = True
sp.endTick = mxBracket.get('line-end')
sp.endHeight = mxBracket.get('end-length')
sp.lineType = mxBracket.get('line-type')
# will only have a target if this follows the note
if targetLast is not None:
sp.addSpannedElements(targetLast)
else:
raise FromMxObjectsException('unidentified mxType of mxBracket:', mxType)
mxDashes = mxDirection.getDashes()
# import mxDashes as m21 Line objects
if mxDashes is not None:
mxType = mxDashes.get('type')
idFound = mxDashes.get('number')
#environLocal.printDebug(['mxDirectionToSpanners', 'found mxDashes', mxType, idFound])
if mxType == 'start':
sp = spanner.Line()
sp.idLocal = idFound
sp.startTick = 'none'
sp.lineType = 'dashed'
spannerBundle.append(sp)
# define this spanner as needing component assignment from
# the next general note
spannerBundle.setPendingSpannedElementAssignment(sp, 'GeneralNote')
elif mxType == 'stop':
# need to retrieve an existing spanner
# try to get base class of both Crescendo and Decrescendo
sp = spannerBundle.getByClassIdLocalComplete('Line',
idFound, False)[0] # get first
sp.completeStatus = True
sp.endTick = 'none'
# will only have a target if this follows the note
if targetLast is not None:
sp.addSpannedElements(targetLast)
else:
raise FromMxObjectsException('unidentified mxType of mxBracket:', mxType)
#-------------------------------------------------------------------------------
def mxFermataToFermata(mxFermata, inputM21 = None):
'''
Convert an mxFermata object to a music21 expressions.Fermata
object.
If inputM21 is None, creates a new Fermata object
and returns it. Otherwise changes the current Fermata
object and returns nothing.
>>> mxFermata = musicxml.mxObjects.Fermata()
>>> mxFermata.set('type', 'inverted')
>>> fermata = musicxml.fromMxObjects.mxFermataToFermata(mxFermata)
>>> fermata.type
'inverted'
'''
if inputM21 is None:
fermata = expressions.Fermata()
else:
fermata = inputM21
fermata.type = mxFermata.get('type')
if inputM21 is None:
return fermata
def mxTechnicalToArticulation(mxTechnicalMark, inputM21 = None):
'''
Convert an mxTechnicalMark to a music21.articulations.TechnicalIndication object or one
of its subclasses.
Example: Provided an musicxml.mxObjects.TechnicalMark object (not an mxTechnical object)
configure the music21 object.
Create both a musicxml.mxObjects.ArticulationMark object and a conflicting music21 object:
>>> mxTechnicalMark = musicxml.mxObjects.TechnicalMark('up-bow')
>>> mxTechnicalMark.set('placement', 'below')
>>> a = articulations.DownBow()
>>> a.placement = 'above'
Now override the music21 object with the mxArticulationMark object's characteristics
>>> musicxml.fromMxObjects.mxTechnicalToArticulation(mxTechnicalMark, inputM21 = a)
>>> 'DownBow' in a.classes
False
>>> 'UpBow' in a.classes
True
>>> a.placement
'below'
'''
mappingList = {'up-bow' : articulations.UpBow,
'down-bow' : articulations.DownBow,
'harmonic' : articulations.Harmonic,
'open-string' : articulations.OpenString,
'thumb-position' : articulations.StringThumbPosition,
'fingering' : articulations.StringFingering,
'pluck' : articulations.FrettedPluck,
'double-tongue' : articulations.DoubleTongue,
'triple-tongue' : articulations.TripleTongue,
'stopped' : articulations.Stopped,
'snap-pizzicato' : articulations.SnapPizzicato,
'fret' : articulations.FretIndication,
'string' : articulations.StringIndication,
'hammer-on' : articulations.HammerOn,
'pull-off' : articulations.PullOff,
'bend' : articulations.FretBend,
'tap' : articulations.FretTap,
'heel' : articulations.OrganHeel,
'toe' : articulations.OrganToe,
'fingernails' : articulations.HarpFingerNails,
'other-technical' : articulations.TechnicalIndication,
}
mxName = mxTechnicalMark.tag
if mxName not in mappingList:
environLocal.printDebug("Cannot translate %s in %s." % (mxName, mxTechnicalMark))
artClass = mappingList[mxName]
if inputM21 is None:
art = artClass()
else:
art = inputM21
art.__class__ = artClass
try:
art.placement = mxTechnicalMark.get('placement')
except xmlnode.XMLNodeException:
pass
if inputM21 is None:
return art
def mxArticulationToArticulation(mxArticulationMark, inputM21 = None):
'''
Convert an mxArticulationMark to a music21.articulations.Articulation
object or one of its subclasses.
Example: Provided an musicxml.mxObjects.ArticulationMark object (not an mxArticulations object)
configure the music21 object.
Create both a musicxml.mxObjects.ArticulationMark object and a conflicting music21 object:
>>> mxArticulationMark = musicxml.mxObjects.ArticulationMark('accent')
>>> mxArticulationMark.set('placement', 'below')
>>> a = articulations.Tenuto()
>>> a.placement = 'above'
Now override the music21 object with the mxArticulationMark object's characteristics
>>> musicxml.fromMxObjects.mxArticulationToArticulation(mxArticulationMark, inputM21 = a)
>>> 'Tenuto' in a.classes
False
>>> 'Accent' in a.classes
True
>>> a.placement
'below'
'''
mappingList = {'accent' : articulations.Accent,
'strong-accent' : articulations.StrongAccent,
'staccato' : articulations.Staccato,
'staccatissimo' : articulations.Staccatissimo,
'spiccato' : articulations.Spiccato,
'tenuto' : articulations.Tenuto,
'detached-legato' : articulations.DetachedLegato,
'scoop' : articulations.Scoop,
'plop' : articulations.Plop,
'doit' : articulations.Doit,
'falloff' : articulations.Falloff,
'breath-mark' : articulations.BreathMark,
'caesura' : articulations.Caesura,
'stress' : articulations.Stress,
'unstress' : articulations.Unstress,
'other-articulation': articulations.Articulation,
}
mxName = mxArticulationMark.tag
if mxName not in mappingList:
environLocal.printDebug("Cannot translate %s in %s." % (mxName, mxArticulationMark))
artClass = mappingList[mxName]
if inputM21 is None:
art = artClass()
else:
art = inputM21
art.__class__ = artClass
art.placement = mxArticulationMark.get('placement')
if inputM21 is None:
return art
def mxOrnamentToExpressionOrArticulation(mxOrnament):
'''
Convert mxOrnament into a music21 ornament.
This only processes non-spanner ornaments.
Many mxOrnaments are spanners: these are handled elsewhere.
Returns None if cannot be converted or not defined.
'''
orn = None
#environLocal.printDebug(['calling mxOrnamentToExpressionOrArticulation with', mxOrnament])
if isinstance(mxOrnament, mxObjects.TrillMark):
orn = expressions.Trill()
orn.placement = mxOrnament.get('placement')
elif isinstance(mxOrnament, mxObjects.Mordent):
orn = expressions.Mordent()
elif isinstance(mxOrnament, mxObjects.InvertedMordent):
orn = expressions.InvertedMordent()
elif isinstance(mxOrnament, mxObjects.Turn):
orn = expressions.Turn()
elif isinstance(mxOrnament, mxObjects.InvertedTurn):
orn = expressions.InvertedTurn()
elif isinstance(mxOrnament, mxObjects.Shake):
orn = expressions.Shake()
elif isinstance(mxOrnament, mxObjects.Schleifer):
orn = expressions.Schleifer()
return orn # may be None
#-------------------------------------------------------------------------------
# Chords
def mxToChord(mxNoteList, inputM21=None, spannerBundle=None):
'''
Given an a list of mxNotes, fill the necessary parameters
>>> a = musicxml.mxObjects.Note()
>>> p = musicxml.mxObjects.Pitch()
>>> p.set('step', 'A')
>>> p.set('octave', 3)
>>> a.setDefaults()
>>> a.set('pitch', p)
>>> b = musicxml.mxObjects.Note()
>>> b.setDefaults()
>>> b.set('chord', True)
>>> m = musicxml.mxObjects.Measure()
>>> m.setDefaults()
>>> a.external['measure'] = m # assign measure for divisions ref
>>> a.external['divisions'] = m.external['divisions']
>>> b.external['measure'] = m # assign measure for divisions ref
>>> b.external['divisions'] = m.external['divisions']
>>> c = musicxml.fromMxObjects.mxToChord([a, b])
>>> len(c.pitches)
2
>>> c.pitches[0]
<music21.pitch.Pitch A3>
>>> a = musicxml.mxObjects.Note()
>>> a.setDefaults()
>>> nh1 = musicxml.mxObjects.Notehead()
>>> nh1.set('charData', 'diamond')
>>> a.noteheadObj = nh1
>>> b = musicxml.mxObjects.Note()
>>> b.setDefaults()
>>> b.set('chord', True)
>>> m = musicxml.mxObjects.Measure()
>>> m.setDefaults()
>>> a.external['measure'] = m # assign measure for divisions ref
>>> a.external['divisions'] = m.external['divisions']
>>> b.external['measure'] = m # assign measure for divisions ref
>>> b.external['divisions'] = m.external['divisions']
>>> c = musicxml.fromMxObjects.mxToChord([a, b])
>>> c.getNotehead(c.pitches[0])
'diamond'
'''
if inputM21 == None:
c = chord.Chord()
else:
c = inputM21
if spannerBundle is None:
#environLocal.printDebug(['mxToNote()', 'creating SpannerBundle'])
spannerBundle = spanner.SpannerBundle()
else: # if we are passed in as spanner bundle, look for any pending
# component assignments
spannerBundle.freePendingSpannedElementAssignment(c)
# assume that first chord is the same duration for all parts
mxToDuration(mxNoteList[0], c.duration)
# assume that first note in list has a grace object (and all do)
mxGrace = mxNoteList[0].get('graceObj')
pitches = []
ties = [] # store equally spaced list; use None if not defined
noteheads = [] # store notehead attributes that correspond with pitches
stemDirs = [] # store stem direction attributes that correspond with pitches
for mxNote in mxNoteList:
# extract pitch pbjects
p = mxToPitch(mxNote)
pitches.append(p)
#extract notehead objects; may be None
nh = mxNote.get('noteheadObj')
noteheads.append(nh)
#extract stem directions
stemDir = mxNote.get('stem')
stemDirs.append(stemDir)
if len(mxNote.tieList) > 0:
tieObj = mxToTie(mxNote)
#environLocal.printDebug(['found tie in chord', tieObj])
ties.append(tieObj)
else: # need place holder for each pitch
ties.append(None)
# set all at once
c.pitches = pitches
# set beams from first note of chord
beamsObj = mxToBeams(mxNoteList[0].beamList)
c.beams = beamsObj
# set ties based on pitches
for i, t in enumerate(ties):
if t is not None:
# provide pitch to assign tie to based on index number
c.setTie(t, pitches[i])
#set notehead based on pitches
for index, obj in enumerate(noteheads):
if obj is not None:
c.setNotehead(obj.charData, c.pitches[index])
# set color per pitch
c.setColor(obj.get('color'), c.pitches[index])
#set stem direction based upon pitches
for i, sd in enumerate(stemDirs):
if sd != 'unspecified':
c.setStemDirection(sd, c.pitches[i])
if mxGrace is not None:
c = c.getGrace()
return c
#-------------------------------------------------------------------------------
# Notes
def mxToNote(mxNote, spannerBundle=None, inputM21=None):
'''
Translate a MusicXML :class:`~music21.musicxml.mxObjects.Note`
to a :class:`~music21.note.Note`.
The `spannerBundle` parameter can be a list or a Stream
for storing and processing Spanner objects.
If inputM21 is not `None` then that object is used
for translating. Otherwise a new Note is created.
Returns a `note.Note` object.
>>> mxNote = musicxml.mxObjects.Note()
>>> mxNote.setDefaults()
>>> mxMeasure = musicxml.mxObjects.Measure()
>>> mxMeasure.setDefaults()
>>> mxMeasure.append(mxNote)
>>> mxNote.external['measure'] = mxMeasure # manually create ref
>>> mxNote.external['divisions'] = mxMeasure.external['divisions']
>>> n = musicxml.fromMxObjects.mxToNote(mxNote)
>>> n
<music21.note.Note C>
'''
if inputM21 is None:
n = note.Note()
else:
n = inputM21
mxToPitch(mxNote, n.pitch) # required info will be taken from entire note
beamsObj = mxToBeams(mxNote.beamList)
n.beams = beamsObj
mxStem = mxNote.get('stem')
if mxStem is not None:
n.stemDirection = mxStem
# gets the notehead object from the mxNote and sets value of the music21 note
# to the value of the notehead object
mxNotehead = mxNote.get('noteheadObj')
if mxNotehead is not None:
if mxNotehead.charData not in ['', None]:
n.notehead = mxNotehead.charData
if mxNotehead.get('color') is not None:
n.color = mxNotehead.get('color')
# after this, use combined function for notes and rests...
return mxNoteToGeneralNoteHelper(n, mxNote, spannerBundle)
def mxToRest(mxNote, inputM21=None, spannerBundle=None):
'''Translate a MusicXML :class:`~music21.musicxml.mxObjects.Note` object to a :class:`~music21.note.Rest`.
If an `inputM21` object reference is provided, this object will be configured; otherwise, a new :class:`~music21.note.Rest` object is created and returned.
'''
if inputM21 == None:
r = note.Rest()
else:
r = inputM21
return mxNoteToGeneralNoteHelper(r, mxNote, spannerBundle)
def mxNoteToGeneralNoteHelper(n, mxNote, spannerBundle=None):
'''
helper function for things common to notes and rests.
n can be a note or rest...
'''
# doing this will create an instance, but will not be passed
# out of this method, and thus is only for testing
if spannerBundle is None:
#environLocal.printDebug(['mxToNote()', 'creating SpannerBundle'])
spannerBundle = spanner.SpannerBundle()
else: # if we are passed in as spanner bundle, look for any pending
# component assignments
spannerBundle.freePendingSpannedElementAssignment(n)
# print object == 'no' and grace notes may have a type but not
# a duration. they may be filtered out at the level of Stream
# processing
if mxNote.get('printObject') == 'no':
n.hideObjectOnPrint = True
#environLocal.printDebug(['got mxNote with printObject == no'])
mxGrace = mxNote.get('graceObj')
if mxGrace is not None:
#environLocal.printDebug(['mxGrace', mxGrace, mxNote, n.duration])
# in some casses grace notes may not have an assigned duration type
# this default type is set here, before assigning to n.duration
if mxNote.type is None:
#environLocal.printDebug(['mxToNote', 'mxNote that is a grace missing duration type'])
mxNote.type = 'eighth'
# the n.duration object here will be configured based on mxNote
mxToDuration(mxNote, n.duration)
# get color from Note first; if not, try to get from notehead
if mxNote.get('color') is not None:
n.color = mxNote.get('color')
# get x-positioning if any...
if mxNote.get('default-x') is not None:
n.xPosition = mxNote.get('default-x')
# can use mxNote.tieList instead
mxTieList = mxNote.get('tieList')
if len(mxTieList) > 0:
tieObj = mxToTie(mxNote) # m21 tie object
# provide entire Note
# n.tie is defined in GeneralNote as None by default
n.tie = tieObj
# things found in notations object:
# articulations, slurs
mxNotations = mxNote.get('notationsObj')
if mxNotations is not None:
# get a list of mxArticulationMarks, not mxArticulations
mxArticulationMarkList = mxNotations.getArticulations()
for mxObj in mxArticulationMarkList:
articulationObj = mxArticulationToArticulation(mxObj)
n.articulations.append(articulationObj)
# get any technical marks, a list of mxTechnicalMarks, not mxTechnical
# they live with articulations
mxTechnicalMarkList = mxNotations.getTechnical()
for mxObj in mxTechnicalMarkList:
technicalObj = mxTechnicalToArticulation(mxObj)
n.articulations.append(technicalObj)
# get any fermatas, store on expressions
mxFermataList = mxNotations.getFermatas()
for mxObj in mxFermataList:
fermataObj = mxFermataToFermata(mxObj)
n.expressions.append(fermataObj)
mxOrnamentsList = mxNotations.getOrnaments()
# if len(mxOrnamentsList) > 0:
# environLocal.printDebug(['mxOrnamentsList:', mxOrnamentsList])
for mxOrnamentsObj in mxOrnamentsList:
for mxObj in mxOrnamentsObj:
post = mxOrnamentToExpressionOrArticulation(mxObj)
if post is not None:
n.expressions.append(post)
#environLocal.printDebug(['adding to epxressions', post])
# create spanners:
mxNotationsToSpanners(n, mxNotations, spannerBundle)
# translate if necessary, otherwise leaves unchanged
n = mxGraceToGrace(n, mxGrace)
return n
#------------------------------------------------------------------------------
# Defaults
def mxDefaultsToScoreLayout(mxDefaults, inputM21=None):
'''
Convert a :class:`~music21.musicxml.mxObjects.Defaults`
object to a :class:`~music21.layout.ScoreLayout`
object
'''
if inputM21 is None:
scoreLayout = layout.ScoreLayout()
else:
scoreLayout = inputM21
mxScalingObj = mxDefaults.scalingObj
if mxScalingObj is not None:
mms = mxScalingObj.millimeters
scoreLayout.scalingMillimeters = mms
tenths = mxScalingObj.tenths
scoreLayout.scalingTenths = tenths
for mxLayoutObj in mxDefaults.layoutList:
if mxLayoutObj.tag == 'page-layout':
scoreLayout.pageLayout = mxPageLayoutToPageLayout(mxLayoutObj)
elif mxLayoutObj.tag == 'system-layout':
scoreLayout.systemLayout = mxSystemLayoutToSystemLayout(mxLayoutObj)
elif mxLayoutObj.tag == 'staff-layout': # according to xsd can be more than one. meaning?
scoreLayout.staffLayoutList.append(mxStaffLayoutToStaffLayout(mxLayoutObj))
return scoreLayout
#-------------------------------------------------------------------------------
# Measures
def addToStaffReference(mxObjectOrNumber, music21Object, staffReference):
'''
Utility routine for importing musicXML objects;
here, we store a reference to the music21 object in a dictionary,
where keys are the staff values. Staff values may be None, 1, 2, etc.
'''
#environLocal.printDebug(['addToStaffReference(): called with:', music21Object])
if common.isListLike(mxObjectOrNumber):
if len(mxObjectOrNumber) > 0:
mxObjectOrNumber = mxObjectOrNumber[0] # if a chord, get the first components
else: # if an empty list
environLocal.printDebug(['got an mxObject as an empty list', mxObjectOrNumber])
return
# add to staff reference
if hasattr(mxObjectOrNumber, 'staff'):
key = mxObjectOrNumber.staff
# some objects store staff assignment simply as number
else:
try:
key = mxObjectOrNumber.get('number')
except xmlnode.XMLNodeException:
return
except AttributeError: # a normal number
key = mxObjectOrNumber
if key not in staffReference:
staffReference[key] = []
staffReference[key].append(music21Object)
def mxToMeasure(mxMeasure, spannerBundle=None, inputM21=None, lastMeasureInfo=None):
'''
Translate an mxMeasure (a MusicXML :class:`~music21.musicxml.mxObjects.Measure` object)
into a music21 :class:`~music21.stream.Measure`.
If an `inputM21` object reference is provided, this object will be
configured and returned; otherwise, a new :class:`~music21.stream.Measure` object is created.
The `spannerBundle` that is passed in is used to accumulate any created Spanners.
This Spanners are not inserted into the Stream here.
Returns a tuple of (music21.stream.Measure object, staffReference (a dictionary for partStaffs of
elements that only belong to a single staff), and a transposition)
'''
if inputM21 == None:
m = stream.Measure()
else:
m = inputM21
# staff assignments: can create a dictionary with components in each
# staff; this dictionary will then be used to copy this measure and
# split components between two parts of more than one staff is defined
staffReference = {}
# doing this will create an instance, but will not be passed
# out of this method, and thus is only for testing
if spannerBundle is None:
#environLocal.printDebug(['mxToMeasure()', 'creating SpannerBundle'])
spannerBundle = spanner.SpannerBundle()
if lastMeasureInfo is not None:
lastMNum, lastMSuffix = lastMeasureInfo
else:
lastMNum, lastMSuffix = (None, None)
mNumRaw = mxMeasure.get('number')
if mNumRaw is None:
mNum = None
mSuffix = None
else:
mNum, mSuffix = common.getNumFromStr(mNumRaw)
# assume that measure numbers are integers
if mNum not in [None, '']:
m.number = int(mNum)
if mSuffix not in [None, '']:
m.numberSuffix = mSuffix
# fix for Finale which calls unnumbered measures X1, X2, etc. which
# we convert to 1.X, 2.X, etc. without this...
if lastMNum is not None:
if m.numberSuffix == 'X' and m.number != lastMNum + 1:
newSuffix = m.numberSuffix + str(m.number)
if lastMSuffix is not None:
newSuffix = lastMSuffix + newSuffix
m.number = lastMNum
m.numberSuffix = newSuffix
data = mxMeasure.get('width')
if data != None: # may need to do a format/unit conversion?
m.layoutWidth = data
# not yet implemented
junk = mxMeasure.get('implicit')
mxAttributes = mxMeasure.get('attributesObj')
mxAttributesInternal = True
# if we do not have defined mxAttributes, must get from stored attributes
if mxAttributes is None:
# need to keep track of where mxattributes src is coming from
# if attributes are defined in this measure, mxAttributesInternal
# is true
mxAttributesInternal = False
# not all measures have attributes definitions; this
# gets the last-encountered measure attributes
mxAttributes = mxMeasure.external['attributes']
if mxAttributes is None:
raise FromMxObjectsException(
'no mxAttribues available for this measure')
#environLocal.printDebug(['mxAttriutes clefList', mxAttributes.clefList,
# mxAttributesInternal])
staffLayoutObjects = []
# getting first for each of these for now
if mxAttributesInternal:
if len(mxAttributes.timeList) != 0:
for mxSub in mxAttributes.timeList:
ts = mxToTimeSignature(mxSub)
addToStaffReference(mxSub, ts, staffReference)
m._insertCore(0, ts)
if len(mxAttributes.clefList) != 0:
for mxClef in mxAttributes.clefList:
cl = mxClefToClef(mxClef)
addToStaffReference(mxClef, cl, staffReference)
m._insertCore(0, cl)
if len(mxAttributes.keyList) != 0:
for mxSub in mxAttributes.keyList:
ks = mxKeyListToKeySignature(mxSub)
addToStaffReference(mxSub, ks, staffReference)
m._insertCore(0, ks)
if len(mxAttributes.staffDetailsList) != 0:
for mxStaffDetails in mxAttributes.staffDetailsList:
foundMatch = False
# perhaps we've already put a staffLayout into the measure?
if mxStaffDetails._attr['number'] is not None:
for stl in staffLayoutObjects:
if stl.staffNumber == int(mxStaffDetails._attr['number']):
try:
stl.staffSize = float(mxStaffDetails.staffSize)
except TypeError:
if mxStaffDetails.staffSize is None:
pass
else:
raise TypeError("Incorrect number for mxStaffDetails.staffSize: %s", mxStaffDetails.staffSize)
foundMatch = True
break
else:
for stl in staffLayoutObjects:
if stl.staffSize is None:
stl.staffSize = float(mxStaffDetails.staffSize)
foundMatch = True
if stl.staffLines is None:
stl.staffLines = int(mxStaffDetails.staffLines)
foundMatch = True
if foundMatch is False:
staffSize = None
try:
staffSize = float(mxStaffDetails.staffSize)
except TypeError:
staffSize = None
staffLines = None
try:
staffLines = int(mxStaffDetails.staffLines)
except TypeError:
staffLines = 5
if mxStaffDetails._attr['number'] is not None:
stl = layout.StaffLayout(staffSize = staffSize, staffLines = staffLines, staffNumber=int(mxStaffDetails._attr['number']))
else:
stl = layout.StaffLayout(staffSize = staffSize, staffLines = staffLines)
if 'print-object' in mxStaffDetails._attr:
staffPrinted = mxStaffDetails._attr['print-object']
if staffPrinted == 'no' or staffPrinted is False:
stl.hidden = True
elif staffPrinted == 'yes' or staffPrinted is True:
stl.hidden = False
#else:
# print mxStaffDetails._attr
addToStaffReference(mxStaffDetails, stl, staffReference)
m._insertCore(0, stl)
staffLayoutObjects.append(stl)
#staffLayoutsAlreadySetList.append(stl)
#print "Got an mxStaffDetails %r" % mxStaffDetails
# transposition may be defined for a Part in the Measure attributes
transposition = None
if mxAttributesInternal and mxAttributes.transposeObj is not None:
# get interval object
transposition = mxTransposeToInterval(mxAttributes.transposeObj)
#environLocal.printDebug(['mxToMeasure: got transposition', transposition])
if mxAttributes.divisions is not None:
divisions = mxAttributes.divisions
else:
divisions = mxMeasure.external['divisions']
if divisions is None:
environLocal.printDebug(['cannot get a division from mxObject', m, "mxMeasure.external['divisions']", mxMeasure.external['divisions']])
raise FromMxObjectsException('cannot get a division from mxObject')
if mxMeasure.getVoiceCount() > 1:
useVoices = True
# count from zero
for voiceId in mxMeasure.getVoiceIndices():
v = stream.Voice()
v.id = voiceId
m._insertCore(0, v)
else:
useVoices = False
# iterate through components found on components list
# set to zero for each measure
offsetMeasureNote = 0 # offset of note w/n measure
mxNoteList = [] # for accumulating notes in chords
mxLyricList = [] # for accumulating lyrics assigned to chords
nLast = None # store the last-create music21 note for Spanners
restAndNoteCount = {'rest': 0, 'note': 0}
chordVoice = None # Sibelius 7.1 only puts a <voice> tag on the
# first note of a chord, so we need to make sure
# that we keep track of the last voice...
for i in range(len(mxMeasure)):
# try to get the next object for chord comparisons
mxObj = mxMeasure[i]
if i < len(mxMeasure) - 1:
mxObjNext = mxMeasure[i + 1]
else:
mxObjNext = None
#environLocal.printDebug(['handling', mxObj])
# NOTE: tests have shown that using isinstance() here is much faster
# than checking the .tag attribute.
# check for backup and forward first
if isinstance(mxObj, mxObjects.Backup):
# resolve as quarterLength, subtract from measure offset
#environLocal.printDebug(['found musicxl backup:', mxObj.duration])
offsetMeasureNote -= float(mxObj.duration) / float(divisions)
continue
elif isinstance(mxObj, mxObjects.Forward):
# resolve as quarterLength, add to measure offset
#environLocal.printDebug(['found musicxl forward:', mxObj.duration, 'divisions', divisions])
offsetMeasureNote += float(mxObj.duration) / float(divisions)
continue
elif isinstance(mxObj, mxObjects.Print):
# mxPrint objects may be found in a Measure's components
# contain page or system layout information among others
mxPrint = mxObj
addPageLayout = False
addSystemLayout = False
addStaffLayout = False
try:
addPageLayout = mxPrint.get('new-page')
if addPageLayout is not None:
addPageLayout = True # false for No??
else:
addPageLayout = False
except xmlnode.XMLNodeException:
pass
if not addPageLayout:
try:
addPageLayout = mxPrint.get('page-number')
if addPageLayout is not None:
addPageLayout = True
else:
addPageLayout = False
except xmlnode.XMLNodeException:
addPageLayout = False
if not addPageLayout:
for layoutType in mxPrint.componentList:
if isinstance(layoutType, mxObjects.PageLayout):
addPageLayout = True
break
try:
addSystemLayout = mxPrint.get('new-system')
if addSystemLayout is not None:
addSystemLayout = True # false for No?
else:
addSystemLayout = False
except xmlnode.XMLNodeException:
pass
if not addSystemLayout:
for layoutType in mxPrint.componentList:
if isinstance(layoutType, mxObjects.SystemLayout):
addSystemLayout = True
break
for layoutType in mxPrint.componentList:
if isinstance(layoutType, mxObjects.StaffLayout):
addStaffLayout = True
break
#--- now we know what we need to add, add em
if addPageLayout:
pl = mxPrintToPageLayout(mxPrint)
# store at zero position
m._insertCore(0, pl)
if addSystemLayout or not addPageLayout:
sl = mxPrintToSystemLayout(mxPrint)
# store at zero position
m._insertCore(0, sl)
if addStaffLayout:
stlList = mxPrintToStaffLayoutList(mxPrint)
for stl in stlList:
foundPrevious = False
for stlSetFromAttributes in staffLayoutObjects:
if stlSetFromAttributes.staffNumber == stl.staffNumber or stlSetFromAttributes.staffNumber is None or stl.staffNumber is None:
foundPrevious = True
stlSetFromAttributes.distance = stl.distance
if stlSetFromAttributes.hidden is None:
stlSetFromAttributes.hidden = stl.hidden
break
if foundPrevious is False:
addToStaffReference(str(stl.staffNumber), stl, staffReference)
m._insertCore(0, stl)
# <sound> tags may be found in the Measure, used to define tempo
elif isinstance(mxObj, mxObjects.Sound):
pass
elif isinstance(mxObj, mxObjects.Barline):
# repeat is a tag found in the barline object
mxBarline = mxObj
mxRepeatObj = mxBarline.get('repeatObj')
if mxRepeatObj is not None:
barline = mxToRepeat(mxBarline)
else:
barline = mxToBarline(mxBarline)
# barline objects also store ending objects, that mark begin
# and end of repeat bracket designations
mxEndingObj = mxBarline.get('endingObj')
if mxEndingObj is not None:
#environLocal.printDebug(['found mxEndingObj', mxEndingObj, 'm', m])
# get all incomplete spanners of the appropriate class that are
# not complete
rbSpanners = spannerBundle.getByClass('RepeatBracket').getByCompleteStatus(False)
# if we have no complete bracket objects, must start a new one
if len(rbSpanners) == 0:
# create with this measure as the object
rb = spanner.RepeatBracket(m)
# there may just be an ending marker, and no start
# this implies just one measure
if mxEndingObj.get('type') in ['stop', 'discontinue']:
rb.completeStatus = True
rb.number = mxEndingObj.get('number')
# set number; '' or None is interpreted as 1
spannerBundle.append(rb)
# if we have any incomplete, this must be the end
else:
#environLocal.printDebug(['matching RepeatBracket spanner', 'len(rbSpanners)', len(rbSpanners)])
rb = rbSpanners[0] # get RepeatBracket
# try to add this measure; may be the same
rb.addSpannedElements(m)
# in general, any rb found should be the opening, and thus
# this is the closing; can check
if mxEndingObj.get('type') in ['stop', 'discontinue']:
rb.completeStatus = True
rb.number = mxEndingObj.get('number')
else:
environLocal.warn('found mxEnding object that is not stop message, even though there is still an open start message. -- ignoring it')
if barline.location == 'left':
#environLocal.printDebug(['setting left barline', barline])
m.leftBarline = barline
elif barline.location == 'right':
#environLocal.printDebug(['setting right barline', barline])
m.rightBarline = barline
else:
environLocal.printDebug(['not handling barline that is neither left nor right', barline, barline.location])
elif isinstance(mxObj, mxObjects.Note):
mxNote = mxObj
if isinstance(mxObjNext, mxObjects.Note):
mxNoteNext = mxObjNext
else:
mxNoteNext = None
if mxNote.get('print-object') == 'no':
#environLocal.printDebug(['got mxNote with printObject == no', 'measure number', m.number])
continue
# mxGrace = mxNote.get('graceObj')
# if mxGrace is not None: # graces have a type but not a duration
# #environLocal.printDebug(['got mxNote with an mxGrace', 'duration', mxNote.get('duration'), 'measure number',
# #m.number])
# continue
# the first note of a chord is not identified directly; only
# by looking at the next note can we tell if we have the first
# note of a chord
if mxNoteNext is not None and mxNoteNext.get('chord') is True:
if mxNote.get('chord') is False:
mxNote.set('chord', True) # set the first as a chord
if mxNote.voice is not None:
chordVoice = mxNote.voice
if mxNote.get('rest') in [None, False]: # it is a note
# if a chord, do not increment until chord is complete
if mxNote.get('chord') is True:
mxNoteList.append(mxNote)
offsetIncrement = 0
# store lyrics for latter processing
for mxLyric in mxNote.lyricList:
mxLyricList.append(mxLyric)
else:
restAndNoteCount['note'] += 1
try:
n = mxToNote(mxNote, spannerBundle=spannerBundle)
except FromMxObjectsException as strerror:
raise FromMxObjectsException('cannot translate note in measure %s: %s' % (mNumRaw, strerror))
addToStaffReference(mxNote, n, staffReference)
if useVoices:
useVoice = mxNote.voice
if useVoice is None:
useVoice = chordVoice
if useVoice is None:
environLocal.warn("Cannot translate a note with a missing voice tag when no previous voice tag was given. Assuming voice 1... Object is %r " % mxNote)
useVoice = 1
thisVoice = m.voices[useVoice]
if thisVoice is None:
environLocal.warn('Cannot find voice %d for Note %r; putting outside of voices...' % (mxNote.voice, mxNote))
m._insertCore(offsetMeasureNote, n)
else:
thisVoice._insertCore(offsetMeasureNote, n)
else:
m._insertCore(offsetMeasureNote, n)
offsetIncrement = n.quarterLength
currentLyricNumber = 1
for mxLyric in mxNote.lyricList:
lyricObj = mxToLyric(mxLyric)
if lyricObj.number == 0:
lyricObj.number = currentLyricNumber
n.lyrics.append(lyricObj)
currentLyricNumber += 1
nLast = n # update
# if mxNote.get('notationsObj') is not None:
# for mxObjSub in mxNote.get('notationsObj'):
# # deal with ornaments, trill, etc
# pass
else: # its a rest
restAndNoteCount['rest'] += 1
n = note.Rest()
mxToRest(mxNote, inputM21=n)
addToStaffReference(mxNote, n, staffReference)
#m.insert(offsetMeasureNote, n)
if useVoices:
vCurrent = m.voices[mxNote.voice]
if vCurrent is not None:
vCurrent._insertCore(offsetMeasureNote, n)
else:
# this can happen when a part defines multiple staves
# where one staff uses voices but the other staff does not
m._insertCore(offsetMeasureNote, n)
#print m, n, mxNote
else:
m._insertCore(offsetMeasureNote, n)
offsetIncrement = n.quarterLength
nLast = n # update
# if we we have notes in the note list and the next
# note either does not exist or is not a chord, we
# have a complete chord
if len(mxNoteList) > 0 and (mxNoteNext is None
or mxNoteNext.get('chord') is False):
c = mxToChord(mxNoteList, spannerBundle=spannerBundle)
# add any accumulated lyrics
currentLyricNumber = 1
for mxLyric in mxLyricList:
lyricObj = mxToLyric(mxLyric)
if lyricObj.number == 0:
lyricObj.number = currentLyricNumber
c.lyrics.append(lyricObj)
currentLyricNumber += 1
addToStaffReference(mxNoteList, c, staffReference)
if useVoices:
useVoice = mxNote.voice
if useVoice is None:
useVoice = chordVoice
if useVoice is None:
environLocal.warn("Cannot translate a note with a missing voice tag when no previous voice tag was given. Assuming voice 1... Object is %r " % mxNote)
useVoice = 1
thisVoice = m.voices[useVoice]
if thisVoice is None:
environLocal.warn('Cannot find voice %d for Note %r; putting outside of voices...' % (mxNote.voice, mxNote))
m._insertCore(offsetMeasureNote, c)
else:
thisVoice._insertCore(offsetMeasureNote, c)
else:
m._insertCore(offsetMeasureNote, c)
mxNoteList = [] # clear for next chord
mxLyricList = []
offsetIncrement = c.quarterLength
nLast = c # update
# only increment Chords after completion
offsetMeasureNote += offsetIncrement
# mxDirections can be dynamics, repeat expressions, text expressions
elif isinstance(mxObj, mxObjects.Direction):
offsetDirection = mxToOffset(mxObj, divisions)
if mxObj.getDynamicMark() is not None:
# in rare cases there may be more than one dynamic in the same
# direction, so we iterate
for d in mxToDynamicList(mxObj):
addToStaffReference(mxObj, d, staffReference)
#m.insert(offsetMeasureNote, d)
m._insertCore(offsetMeasureNote + offsetDirection, d)
mxDirectionToSpanners(nLast, mxObj, spannerBundle)
# TODO: multiple spanners
# if mxObj.getWedge() is not None:
# w = mxToWedge(mxObj)
# addToStaffReference(mxObj, w, staffReference)
# m._insertCore(offsetMeasureNote, w)
if mxObj.getSegno() is not None:
rm = mxToSegno(mxObj.getSegno())
addToStaffReference(mxObj, rm, staffReference)
m._insertCore(offsetMeasureNote, rm)
if mxObj.getCoda() is not None:
rm = mxToCoda(mxObj.getCoda())
addToStaffReference(mxObj, rm, staffReference)
m._insertCore(offsetMeasureNote, rm)
if mxObj.getMetronome() is not None:
#environLocal.printDebug(['got getMetronome', mxObj.getMetronome()])
mm = mxToTempoIndication(mxObj.getMetronome())
addToStaffReference(mxObj, mm, staffReference)
# need to look for metronome marks defined above
# and look for text defined below
m._insertCore(offsetMeasureNote, mm)
if mxObj.getWords() is not None:
# TODO: need to look for tempo words if we have a metro
#environLocal.printDebug(['found mxWords object', mxObj])
# convert into a list of TextExpression objects
# this may be a TextExpression, or a RepeatExpression
for te in mxToTextExpression(mxObj):
#environLocal.printDebug(['got TextExpression object', repr(te)])
# offset here is a combination of the current position
# (offsetMeasureNote) and and the direction's offset
re = te.getRepeatExpression()
if re is not None:
# the repeat expression stores a copy of the text
# expression within it; replace it here on insertion
addToStaffReference(mxObj, re, staffReference)
m._insertCore(offsetMeasureNote + offsetDirection, re)
else:
addToStaffReference(mxObj, te, staffReference)
m._insertCore(offsetMeasureNote + offsetDirection, te)
elif isinstance(mxObj, mxObjects.Harmony):
mxHarmony = mxObj
h = mxToChordSymbol(mxHarmony)
addToStaffReference(mxObj, h, staffReference)
m._insertCore(offsetMeasureNote, h)
elif isinstance(mxObj, mxObjects.Clef):
cl = mxClefToClef(mxObj)
addToStaffReference(mxObj, cl, staffReference)
m._insertCore(offsetMeasureNote, cl)
#environLocal.printDebug(['staffReference', staffReference])
# if we have voices and/or if we used backup/forward, we may have
# empty space in the stream
if useVoices:
for v in m.voices:
if len(v) > 0: # do not bother with empty voices
v.makeRests(inPlace=True)
v._elementsChanged()
m._elementsChanged()
if restAndNoteCount['rest'] == 1 and restAndNoteCount['note'] == 0:
# full measure rest with no notes...
if useVoices:
pass # should do this on a per voice basis...
m._fullMeasureRest = False
else:
m._fullMeasureRest = True
else:
m._fullMeasureRest = False
return m, staffReference, transposition
#-------------------------------------------------------------------------------
# Streams
def mxToStreamPart(mxScore, partId, spannerBundle=None, inputM21=None):
'''
Load a part into a new Stream or one provided by
`inputM21` given an mxScore and a part name.
The `spannerBundle` reference, when passed in,
is used to accumulate Spanners. These are not inserted here.
Though it is incorrect MusicXML, PDFtoMusic creates
empty measures when it should create full
measures of rests (possibly hidden). This routine
fixes that bug. See http://musescore.org/en/node/15129
'''
#environLocal.printDebug(['calling Stream.mxToStreamPart'])
if inputM21 == None:
# need a Score to load parts into
s = stream.Score()
else:
s = inputM21
if spannerBundle == None:
spannerBundle = spanner.SpannerBundle()
mxPart = mxScore.getPart(partId)
# in some cases there may be more than one instrument defined
# in each score part; this has not been tested
mxInstrument = mxScore.getScorePart(partId)
# create a new music21 instrument
instrumentObj = instrument.Instrument()
if mxInstrument is not None: # mxInstrument is a ScorePart
# need an mxScorePart here
mxToInstrument(mxInstrument, instrumentObj)
# add part id as group
instrumentObj.groups.append(partId)
streamPart = stream.Part() # create a part instance for each part
# always assume at sounding, unless transposition is defined in attributes
streamPart.atSoundingPitch = True
# set part id to stream best name
if instrumentObj.bestName() is not None:
streamPart.id = instrumentObj.bestName()
streamPart._insertCore(0, instrumentObj) # add instrument at zero offset
staffReferenceList = []
# offset is in quarter note length
oMeasure = 0.0
lastTimeSignature = None
lastTransposition = None # may change at measure boundaries
lastMeasureWasShort = False # keep track of whether the last measure was short...
lastMeasureNumber = 0
lastMeasureSuffix = None
for i, mxMeasure in enumerate(mxPart):
# t here is transposition, if defined; otherwise it is None
try:
m, staffReference, t = mxToMeasure(mxMeasure,
spannerBundle=spannerBundle,
lastMeasureInfo=(lastMeasureNumber, lastMeasureSuffix))
except Exception as e:
import sys
measureNumber = "unknown"
try:
measureNumber = mxMeasure.get('number')
except:
pass
# see http://stackoverflow.com/questions/6062576/adding-information-to-a-python-exception
execInfoTuple = sys.exc_info()
if hasattr(e, 'message'):
emessage = e.message
else:
emessage = execInfoTuple[0].__name__ + " : " #+ execInfoTuple[1].__name__
message = "In measure (" + measureNumber + "): " + emessage
raise type(e)(type(e)(message), pprint.pformat(traceback.extract_tb(execInfoTuple[2])))
if t is not None:
if lastTransposition is None and i == 0: # if this is the first
#environLocal.printDebug(['transposition', t])
instrumentObj.transposition = t
else: # if not the first measure, need to copy as well
# for now, copy Instrument, change transposition,
# could insert in part, or in measure
newInst = copy.deepcopy(instrumentObj)
newInst.transposition = t
streamPart._insertCore(oMeasure, newInst)
# if a transposition is defined in musicxml, we assume it is
# at written pitch
streamPart.atSoundingPitch = False
# store last for comparison
lastTransposition = t
# there will be one for each measure
staffReferenceList.append(staffReference)
if m.number != lastMeasureNumber:
# we do this check so that we do not compound suffixes, i.e.:
# 23, 23.X1, 23.X1X2, 23.X1X2X3
# and instead just do:
# 23, 23.X1, 23.X2, etc.
lastMeasureNumber = m.number
lastMeasureSuffix = m.numberSuffix
if m.timeSignature is not None:
lastTimeSignature = m.timeSignature
elif lastTimeSignature is None and m.timeSignature is None:
# if no time sigature is defined, need to get a default
ts = meter.TimeSignature()
ts.load('%s/%s' % (defaults.meterNumerator,
defaults.meterDenominatorBeatType))
lastTimeSignature = ts
if m._fullMeasureRest is True:
r1 = m.getElementsByClass('Rest')[0]
if r1.duration.quarterLength == 4.0 and r1.duration.quarterLength != lastTimeSignature.barDuration.quarterLength:
r1.duration.quarterLength = lastTimeSignature.barDuration.quarterLength
m._elementsChanged()
del(m._fullMeasureRest)
# add measure to stream at current offset for this measure
streamPart._insertCore(oMeasure, m)
# note: we cannot assume that the time signature properly
# describes the offsets w/n this bar. need to look at
# offsets within measure; if the .highestTime value is greater
# use this as the next offset
mHighestTime = m.highestTime
lastTimeSignatureQuarterLength = lastTimeSignature.barDuration.quarterLength
if mHighestTime >= lastTimeSignatureQuarterLength :
mOffsetShift = mHighestTime
elif mHighestTime == 0.0 and len(m.flat.notesAndRests) == 0:
## this routine fixes a bug in PDFtoMusic and other MusicXML writers
## that omit empty rests in a Measure. It is a very quick test if
## the measure has any notes. Slower if it does not.
r = note.Rest()
r.duration.quarterLength = lastTimeSignatureQuarterLength
m.insert(0.0, r)
mOffsetShift = lastTimeSignatureQuarterLength
else: # use time signature
# for the first measure, this may be a pickup
# must detect this when writing, as next measures offsets will be
# incorrect
if oMeasure == 0.0:
# cannot get bar duration proportion if cannot get a ts
if m.barDurationProportion() < 1.0:
m.padAsAnacrusis()
#environLocal.printDebug(['incompletely filled Measure found on musicxml import; interpreting as a anacrusis:', 'padingLeft:', m.paddingLeft])
mOffsetShift = mHighestTime
# assume that, even if measure is incomplete, the next bar should
# start at the duration given by the time signature, not highestTime
### no...let's not do this...
else:
mOffsetShift = mHighestTime #lastTimeSignatureQuarterLength
if lastMeasureWasShort is True:
if m.barDurationProportion() < 1.0:
m.padAsAnacrusis() # probably a pickup after a repeat or phrase boundary or something
lastMeasureWasShort = False
else:
if mHighestTime < lastTimeSignatureQuarterLength:
lastMeasureWasShort = True
else:
lastMeasureWasShort = False
oMeasure += mOffsetShift
# if we have multiple staves defined, add more parts, and transfer elements
# note: this presently has to look at _idLastDeepCopyOf to get matches
# to find removed elements after copying; this is probably not the
# best way to do this. # V2.1 -- is not/will not be doing this. in fact idLastDeepCopyOf is
# going away...
# for this part, if any elements are components in the spannerBundle,
# then then we need to update the spannerBundle after the part is copied
streamPartStaff = None
if mxPart.getStavesCount() > 1:
separateOutPartStaffs(mxPart, streamPart, spannerBundle, s, staffReferenceList, partId)
else:
streamPart.addGroupForElements(partId) # set group for components
streamPart.groups.append(partId) # set group for stream itself
# TODO: this does not work with voices; there, Spanners
# will be copied into the Score
# copy spanners that are complete into the part, as this is the
# highest level container that needs them
rm = []
for sp in spannerBundle.getByCompleteStatus(True):
streamPart._insertCore(0, sp)
rm.append(sp)
# remove from original spanner bundle
for sp in rm:
spannerBundle.remove(sp)
# s is the score; adding the aprt to the score
streamPart._elementsChanged()
s._insertCore(0, streamPart)
s._elementsChanged()
# when adding parts to this Score
# this assumes all start at the same place
# even if there is only one part, it will be placed in a Stream
if streamPartStaff is not None:
return streamPartStaff
else:
return streamPart
def separateOutPartStaffs(mxPart, streamPart, spannerBundle, s, staffReferenceList, partId):
'''
given an mxPart and other necessary information, insert into the score (s) multiple
PartStaff objects separating the information for one part from the other
'''
# transfer all spanners to the streamPart such that they get
# updated in copying, then remove them
rm = []
for sp in spannerBundle.getByCompleteStatus(True):
streamPart._insertCore(0, sp)
rm.append(sp)
# remove from original spanner bundle
for sp in rm:
spannerBundle.remove(sp)
# get staves will return a number, between 1 and count
#for staffCount in range(mxPart.getStavesCount()):
for staffNumber in _getUniqueStaffKeys(staffReferenceList):
partStaffId = '%s-Staff%s' % (partId, staffNumber)
#environLocal.printDebug(['partIdStaff', partIdStaff, 'copying streamPart'])
# this deepcopy is necessary, as we will remove components
# in each staff that do not belong
# TODO: Do n-1 deepcopies, instead of n, since the last PartStaff can just remove from the original Part
streamPartStaff = copy.deepcopy(streamPart)
# assign this as a PartStaff, a subclass of Part
streamPartStaff.__class__ = stream.PartStaff
streamPartStaff.id = partStaffId
# remove all elements that are not part of this staff
mStream = streamPartStaff.getElementsByClass('Measure')
for i, staffReference in enumerate(staffReferenceList):
staffExclude = _getStaffExclude(staffReference, staffNumber)
if len(staffExclude) > 0:
m = mStream[i]
for eRemove in staffExclude:
for eMeasure in m:
if eMeasure.derivation.origin is eRemove and eMeasure.derivation.method == '__deepcopy__':
m.remove(eMeasure)
break
for v in m.voices:
v.remove(eRemove)
for eVoice in v.elements:
if eVoice.derivation.origin is eRemove and eVoice.derivation.method == '__deepcopy__':
v.remove(eVoice)
# after adjusting voices see if voices can be reduced or
# removed
#environLocal.printDebug(['calling flattenUnnecessaryVoices: voices before:', len(m.voices)])
m.flattenUnnecessaryVoices(force=False, inPlace=True)
#environLocal.printDebug(['calling flattenUnnecessaryVoices: voices after:', len(m.voices)])
# TODO: copying spanners may have created orphaned
# spanners that no longer have valid connections
# in this part; should be deleted
streamPartStaff.addGroupForElements(partStaffId)
streamPartStaff.groups.append(partStaffId)
streamPartStaff._elementsChanged()
s._insertCore(0, streamPartStaff)
def _getUniqueStaffKeys(staffReferenceList):
'''
Given a list of staffReference dictionaries,
collect and return a list of all unique keys except None
'''
post = []
for staffReference in staffReferenceList:
for key in staffReference:
if key is not None and key not in post:
post.append(key)
post.sort()
# if len(post) > 0:
# print post
return post
def _getStaffExclude(staffReference, targetKey):
'''
Given a staff reference dictionary, remove and combine in a list all elements that
are not part of the given key. Thus, return a list of all entries to remove.
It keeps those elements under staff key None (common to all) and
those under given key. This then is the list of all elements that should be deleted.
'''
post = []
for key in staffReference:
if key is None or int(key) == int(targetKey):
continue
post += staffReference[key]
return post
def mxScoreToScore(mxScore, spannerBundle=None, inputM21=None):
'''
Translate an mxScore into a music21 Score object
or puts it into the
given inputM21 object (which does not necessarily
have to be a :class:`~music21.stream.Score`
object. It can be any :class:`~music21.stream.Stream`
object)
All spannerBundles accumulated at all lower levels
are inserted here.
'''
# TODO: may not want to wait to this leve to insert spanners; may want to
# insert in lower positions if it makes sense
if inputM21 == None:
s = stream.Score()
else:
s = inputM21
if spannerBundle == None:
spannerBundle = spanner.SpannerBundle()
mxPartIds = mxScore.getPartIdsFromPartListObj()
#print mxPartIds
#mxPartIdDictionary = mxScore.partIdToNameDict()
m21PartIdDictionary = {}
# values are part names
#partNameIds = mxPartIdDictionary.keys()
#partNameIds.sort()
#for partId in partNameIds: # part names are part ids
for pNum, partId in enumerate(mxPartIds): # part names are part ids
# NOTE: setting partId not partId: might change
# return the part; however, it is still already attached to the Score
try:
part = mxToStreamPart(mxScore, partId=partId,
spannerBundle=spannerBundle, inputM21=s)
except Exception as e:
import sys
# see http://stackoverflow.com/questions/6062576/adding-information-to-a-python-exception
execInfoTuple = sys.exc_info()
if hasattr(e, 'message'):
emessage = e.message
else:
emessage = str(execInfoTuple[1])
message = "For part number " + str(pNum + 1) + ", with Id (" + partId + "): " + emessage
raise type(e)(type(e)(message), pprint.pformat(traceback.extract_tb(execInfoTuple[2])))
# update dictionary to store music21 part
m21PartIdDictionary[partId] = part
#print("%r %s %r" % (m21PartIdDictionary, partId, part))
# get part/staff groups
#environLocal.printDebug(['partgroups:', mxScore.getPartGroupData()])
partGroupData = mxScore.getPartGroupData()
for partGroup in partGroupData: # a list of dictionaries
# create music21 spanner StaffGroup
sg = layout.StaffGroup()
for partId in partGroup['scorePartIds']:
# get music21 part from partIdDictionary
try:
sg.addSpannedElements(m21PartIdDictionary[partId])
except KeyError as ke:
raise FromMxObjectsException("Cannot find part in m21PartIdDictionary: %s \n Full Dict:\n %r " % (ke, m21PartIdDictionary))
# use configuration routine to transfer/set attributes;
# sets complete status as well
configureStaffGroupFromMxPartGroup(sg, partGroup['partGroup'])
spannerBundle.append(sg) # will be added to the Score
# add metadata object; this is placed after all other parts now
# these means that both Parts and other objects live on Stream.
md = mxScoreToMetadata(mxScore)
s._insertCore(0, md)
if mxScore.defaultsObj is not None:
scoreLayout = mxDefaultsToScoreLayout(mxScore.defaultsObj)
s._insertCore(0, scoreLayout)
# store credits on Score stream
for mxCredit in mxScore.creditList:
co = mxCreditToTextBox(mxCredit)
s._insertCore(0, co) # insert position does not matter
## get supports information
mxIdentification = mxScore.identificationObj
if mxIdentification is not None:
mxEncoding = mxIdentification.encodingObj
if mxEncoding is not None:
for mxSupports in mxEncoding.supportsList:
if (mxSupports.get('attribute') == 'new-system' and
mxSupports.get('value') == 'yes'):
s.definesExplicitSystemBreaks = True
for p in s.parts:
p.definesExplicitSystemBreaks = True
elif (mxSupports.get('attribute') == 'new-page' and
mxSupports.get('value') == 'yes'):
s.definesExplicitPageBreaks = True
for p in s.parts:
p.definesExplicitPageBreaks = True
# only insert complete spanners; at each level possible, complete spanners
# are inserted into either the Score or the Part
# storing complete Part spanners in a Part permits extracting parts with spanners
rm = []
for sp in spannerBundle.getByCompleteStatus(True):
s._insertCore(0, sp)
rm.append(sp)
for sp in rm:
spannerBundle.remove(sp)
s._elementsChanged()
return s
#------------------------------------------------------------------------------
# beam and beams
def mxToBeam(mxBeam, inputM21 = None):
'''
given an mxBeam object return a :class:`~music21.beam.Beam` object
>>> mxBeam = musicxml.mxObjects.Beam()
>>> mxBeam.set('charData', 'begin')
>>> a = musicxml.fromMxObjects.mxToBeam(mxBeam)
>>> a.type
'start'
>>> mxBeam.set('charData', 'continue')
>>> a = musicxml.fromMxObjects.mxToBeam(mxBeam)
>>> a.type
'continue'
>>> mxBeam.set('charData', 'end')
>>> a = musicxml.fromMxObjects.mxToBeam(mxBeam)
>>> a.type
'stop'
>>> mxBeam.set('charData', 'forward hook')
>>> a = musicxml.fromMxObjects.mxToBeam(mxBeam)
>>> a.type
'partial'
>>> a.direction
'right'
>>> mxBeam.set('charData', 'backward hook')
>>> a = musicxml.fromMxObjects.mxToBeam(mxBeam)
>>> a.type
'partial'
>>> a.direction
'left'
>>> mxBeam.set('charData', 'crazy')
>>> a = musicxml.fromMxObjects.mxToBeam(mxBeam)
Traceback (most recent call last):
FromMxObjectsException: unexpected beam type encountered (crazy)
'''
if inputM21 is None:
beamOut = beam.Beam()
else:
beamOut = inputM21
mxType = mxBeam.get('charData')
if mxType == 'begin':
beamOut.type = 'start'
elif mxType == 'continue':
beamOut.type = 'continue'
elif mxType == 'end':
beamOut.type = 'stop'
elif mxType == 'forward hook':
beamOut.type = 'partial'
beamOut.direction = 'right'
elif mxType == 'backward hook':
beamOut.type = 'partial'
beamOut.direction = 'left'
else:
raise FromMxObjectsException('unexpected beam type encountered (%s)' % mxType)
return beamOut
def mxToBeams(mxBeamList, inputM21 = None):
'''given a list of mxBeam objects, sets the beamsList
>>> a = beam.Beams()
>>> a.fill(2, type='start')
>>> mxBeamList = musicxml.toMxObjects.beamsToMx(a)
>>> b = musicxml.fromMxObjects.mxToBeams(mxBeamList)
>>> b
<music21.beam.Beams <music21.beam.Beam 1/start>/<music21.beam.Beam 2/start>>
'''
if inputM21 is None:
beamsOut = beam.Beams()
else:
beamsOut = inputM21
for i, mxBeam in enumerate(mxBeamList):
beamObj = mxToBeam(mxBeam)
beamObj.number = i + 1
beamsOut.beamsList.append(beamObj)
return beamsOut
#---------------------------------------------------------
# layout
def mxPrintToPageLayout(mxPrint, inputM21 = None):
'''
Given an mxPrint object, set object data for
the print section of a layout.PageLayout object
>>> mxPrint = musicxml.mxObjects.Print()
>>> mxPrint.set('new-page', 'yes')
>>> mxPrint.set('page-number', 5)
>>> mxPageLayout = musicxml.mxObjects.PageLayout()
>>> mxPageLayout.pageHeight = 4000
>>> mxPageMargins = musicxml.mxObjects.PageMargins()
>>> mxPageMargins.set('leftMargin', 20)
>>> mxPageMargins.set('rightMargin', 30.2)
>>> mxPageLayout.append(mxPageMargins)
>>> mxPrint.append(mxPageLayout)
>>> pl = musicxml.fromMxObjects.mxPrintToPageLayout(mxPrint)
>>> pl.isNew
True
>>> pl.rightMargin > 30.1 and pl.rightMargin < 30.3
True
>>> pl.leftMargin
20.0
>>> pl.pageNumber
5
Alternatively, pass a music21 object into this routine.
>>> plAlt = layout.PageLayout()
>>> musicxml.fromMxObjects.mxPrintToPageLayout(mxPrint, plAlt)
>>> plAlt.pageNumber
5
>>> plAlt.pageHeight
4000.0
>>> plAlt.isNew
True
'''
if inputM21 is None:
pageLayout = layout.PageLayout()
else:
pageLayout = inputM21
data = mxPrint.get('newPage')
if data == 'yes': # encoded as yes/no in musicxml
pageLayout.isNew = True
else:
pageLayout.isNew = False
number = mxPrint.get('page-number')
if number is not None and number != "":
if common.isStr(number):
pageLayout.pageNumber = int(number)
else:
pageLayout.pageNumber = number
mxPageLayout = None # blank
for x in mxPrint:
if isinstance(x, mxObjects.PageLayout):
mxPageLayout = x
break # find first and break
if mxPageLayout is not None:
mxPageLayoutToPageLayout(mxPageLayout, inputM21 = pageLayout)
if inputM21 is None:
return pageLayout
def mxPageLayoutToPageLayout(mxPageLayout, inputM21 = None):
'''
get a PageLayout object from an mxPageLayout
Called out from mxPrintToPageLayout because it
is also used in the <defaults> tag
'''
if inputM21 is None:
pageLayout = layout.PageLayout()
else:
pageLayout = inputM21
pageHeight = mxPageLayout.get('pageHeight')
if pageHeight is not None:
pageLayout.pageHeight = float(pageHeight)
pageWidth = mxPageLayout.get('pageWidth')
if pageWidth is not None:
pageLayout.pageWidth = float(pageWidth)
mxPageMargins = None
for x in mxPageLayout:
if isinstance(x, mxObjects.PageMargins):
mxPageMargins = x
if mxPageMargins != None:
data = mxPageMargins.get('leftMargin')
if data != None:
# may be floating point values
pageLayout.leftMargin = float(data)
data = mxPageMargins.get('rightMargin')
if data != None:
pageLayout.rightMargin = float(data)
data = mxPageMargins.get('topMargin')
if data != None:
pageLayout.topMargin = float(data)
data = mxPageMargins.get('bottomMargin')
if data != None:
pageLayout.bottomMargin = float(data)
if inputM21 is None:
return pageLayout
def mxPrintToSystemLayout(mxPrint, inputM21 = None):
'''
Given an mxPrint object, set object data
>>> mxPrint = musicxml.mxObjects.Print()
>>> mxPrint.set('new-system', 'yes')
>>> mxSystemLayout = musicxml.mxObjects.SystemLayout()
>>> mxSystemLayout.systemDistance = 55
>>> mxSystemMargins = musicxml.mxObjects.SystemMargins()
>>> mxSystemMargins.set('leftMargin', 20)
>>> mxSystemMargins.set('rightMargin', 30.2)
>>> mxSystemLayout.append(mxSystemMargins)
>>> mxPrint.append(mxSystemLayout)
>>> sl = musicxml.fromMxObjects.mxPrintToSystemLayout(mxPrint)
>>> sl.isNew
True
>>> sl.rightMargin > 30.1 and sl.rightMargin <= 30.2
True
>>> sl.leftMargin
20.0
>>> sl.distance
55.0
'''
if inputM21 is None:
systemLayout = layout.SystemLayout()
else:
systemLayout = inputM21
data = mxPrint.get('newSystem')
if data == 'yes': # encoded as yes/no in musicxml
systemLayout.isNew = True
elif data == 'no':
systemLayout.isNew = False
#mxSystemLayout = mxPrint.get('systemLayout')
mxSystemLayout = None # blank
for x in mxPrint:
if isinstance(x, mxObjects.SystemLayout):
mxSystemLayout = x
break # find first and break
if mxSystemLayout is not None:
mxSystemLayoutToSystemLayout(mxSystemLayout, inputM21 = systemLayout)
if inputM21 is None:
return systemLayout
def mxSystemLayoutToSystemLayout(mxSystemLayout, inputM21 = None):
'''
get a SystemLayout object from an mxSystemLayout
Called out from mxPrintToSystemLayout because it
is also used in the <defaults> tag
'''
if inputM21 is None:
systemLayout = layout.SystemLayout()
else:
systemLayout = inputM21
mxSystemMargins = None
for x in mxSystemLayout:
if isinstance(x, mxObjects.SystemMargins):
mxSystemMargins = x
break
if mxSystemMargins is not None:
data = mxSystemMargins.get('leftMargin')
if data != None:
# may be floating point values
systemLayout.leftMargin = float(data)
data = mxSystemMargins.get('rightMargin')
if data != None:
systemLayout.rightMargin = float(data)
data = mxSystemMargins.get('topMargin')
if data != None:
systemLayout.rightMargin = float(data)
data = mxSystemMargins.get('bottomMargin')
if data != None:
systemLayout.rightMargin = float(data)
if mxSystemLayout.systemDistance != None:
systemLayout.distance = float(mxSystemLayout.systemDistance)
if mxSystemLayout.topSystemDistance != None:
systemLayout.topDistance = float(mxSystemLayout.topSystemDistance)
if inputM21 is None:
return systemLayout
def mxPrintToStaffLayoutList(mxPrint, inputM21 = None):
'''
Given an mxPrint object, return a list of StaffLayout objects (may be empty)
>>> mxPrint = musicxml.mxObjects.Print()
# this is a red-herring... does nothing here...
>>> mxPrint.set('new-system', 'yes')
>>> mxStaffLayout = musicxml.mxObjects.StaffLayout()
>>> mxStaffLayout.staffDistance = 55
>>> mxStaffLayout.set('number', 1)
>>> mxPrint.append(mxStaffLayout)
>>> slList = musicxml.fromMxObjects.mxPrintToStaffLayoutList(mxPrint)
>>> sl = slList[0]
>>> sl.distance
55.0
>>> sl.staffNumber
1
'''
staffLayoutList = []
for x in mxPrint:
if isinstance(x, mxObjects.StaffLayout):
sl = mxStaffLayoutToStaffLayout(x)
staffLayoutList.append(sl)
return staffLayoutList
def mxStaffLayoutToStaffLayout(mxStaffLayout, inputM21 = None):
'''
get a StaffLayout object from an mxStaffLayout
Called out from mxPrintToStaffLayoutList because it
is also used in the <defaults> tag
'''
if inputM21 is None:
staffLayout = layout.StaffLayout()
else:
staffLayout = inputM21
if mxStaffLayout.staffDistance != None:
staffLayout.distance = float(mxStaffLayout.staffDistance)
try:
data = mxStaffLayout.get('number')
if data is not None:
staffLayout.staffNumber = int(data)
except xmlnode.XMLNodeException:
pass
if inputM21 is None:
return staffLayout
#-----------------------------------------------------------------
# metadata
def mxScoreToMetadata(mxScore, inputM21 = None):
'''
Use an mxScore, to fill in parameters of a
:class:`~music21.metadata.Metadata` object.
if `inputM21` is None, a new `Metadata` object
is created and returned at the end.
Otherwise, the parameters of this Metadata object
are changed and nothing is returned.
'''
if inputM21 is not None:
md = inputM21
else:
md = metadata.Metadata()
mxMovementNumber = mxScore.get('movementNumber')
if mxMovementNumber != None:
md.movementNumber = mxMovementNumber
# xml calls this title not name
mxName = mxScore.get('movementTitle')
if mxName != None:
md.movementName = mxName
mxWork = mxScore.get('workObj')
if mxWork != None: # may be set to none
md.title = mxWork.get('workTitle')
#environLocal.printDebug(['mxScoreToMetadata, got title', md.title])
md.number = mxWork.get('workNumber')
md.opusNumber = mxWork.get('opus')
mxIdentification = mxScore.get('identificationObj')
if mxIdentification != None:
for mxCreator in mxIdentification.get('creatorList'):
# do an mx conversion for mxCreator to Contributor
c = mxCreatorToContributor(mxCreator)
md._contributors.append(c)
# not yet supported; an encoding is also found in identification obj
# mxEncoding = mxScore.get('encodingObj')
if inputM21 is None:
return md
def mxCreatorToContributor(mxCreator, inputM21 = None):
'''
Given an mxCreator, fill the necessary parameters of a Contributor.
>>> mxCreator = musicxml.mxObjects.Creator()
>>> mxCreator.set('type', 'composer')
>>> mxCreator.set('charData', 'Beethoven, <NAME>')
>>> c = musicxml.fromMxObjects.mxCreatorToContributor(mxCreator)
>>> c
<music21.metadata.primitives.Contributor object at 0x...>
>>> c.role
'composer'
>>> c.name
'<NAME>'
'''
if inputM21 is None:
c = metadata.Contributor()
else:
c = inputM21
mxCreatorType = mxCreator.get('type')
if mxCreatorType != None and \
mxCreatorType in metadata.Contributor.roleNames:
c.role = mxCreatorType
else: # roles are not defined in musicxml
pass
#environLocal.printDebug(['mxCreatorToContributor:', 'received unknown Contributor role: %s' % mxCreatorType])
# remove any whitespace found
c.name = mxCreator.get('charData').strip()
if inputM21 is None:
return c
#-------------------------------------------------------------------------------
class Test(unittest.TestCase):
def runTest(self):
pass
def testBasic(self):
pass
def pitchOut(self, listIn):
'''
make it so that the tests that look for the old-style pitch.Pitch
representation still work.
'''
out = "["
for p in listIn:
out += str(p) + ', '
out = out[0:len(out)-2]
out += "]"
return out
def testBarRepeatConversion(self):
from music21 import corpus
#a = converter.parse(testPrimitive.simpleRepeat45a)
# this is a good example with repeats
s = corpus.parse('k80/movement3')
for p in s.parts:
post = p.flat.getElementsByClass('Repeat')
self.assertEqual(len(post), 6)
#a = corpus.parse('opus41no1/movement3')
#s.show()
def testVoices(self):
from music21 import converter
from music21.musicxml import testPrimitive
s = converter.parse(testPrimitive.voiceDouble)
m1 = s.parts[0].getElementsByClass('Measure')[0]
self.assertEqual(m1.hasVoices(), True)
self.assertEqual([v.id for v in m1.voices], [u'1', u'2'])
self.assertEqual([e.offset for e in m1.voices[0]], [0.0, 1.0, 2.0, 3.0])
self.assertEqual([e.offset for e in m1.voices['1']], [0.0, 1.0, 2.0, 3.0])
self.assertEqual([e.offset for e in m1.voices[1]], [0.0, 2.0, 2.5, 3.0, 3.5])
self.assertEqual([e.offset for e in m1.voices['2']], [0.0, 2.0, 2.5, 3.0, 3.5])
#s.show()
def testSlurInputA(self):
from music21 import converter
from music21.musicxml import testPrimitive
s = converter.parse(testPrimitive.spannersSlurs33c)
# have 10 spanners
self.assertEqual(len(s.flat.getElementsByClass('Spanner')), 5)
# can get the same from a getAll search
self.assertEqual(len(s.getAllContextsByClass('Spanner')), 5)
# try to get all spanners from the first note
self.assertEqual(len(s.flat.notesAndRests[0].getAllContextsByClass('Spanner')), 5)
#s.show('t')
#s.show()
def testMultipleStavesPerPartA(self):
from music21 import converter
from music21.musicxml import testPrimitive
from music21.musicxml import xmlHandler
mxDoc = xmlHandler.Document()
mxDoc.read(testPrimitive.pianoStaff43a)
# parts are stored in component list
p1 = mxDoc.score.componentList[0]
self.assertEqual(p1.getStavesCount(), 2)
s = converter.parse(testPrimitive.pianoStaff43a)
self.assertEqual(len(s.parts), 2)
#s.show()
self.assertEqual(len(s.parts[0].flat.getElementsByClass('Note')), 1)
self.assertEqual(len(s.parts[1].flat.getElementsByClass('Note')), 1)
self.assertEqual(isinstance(s.parts[0], stream.PartStaff), True)
self.assertEqual(isinstance(s.parts[1], stream.PartStaff), True)
def testMultipleStavesPerPartB(self):
from music21 import converter
from music21.musicxml import testFiles
s = converter.parse(testFiles.moussorgskyPromenade) # @UndefinedVariable
self.assertEqual(len(s.parts), 2)
self.assertEqual(len(s.parts[0].flat.getElementsByClass('Note')), 19)
# only chords in the second part
self.assertEqual(len(s.parts[1].flat.getElementsByClass('Note')), 0)
self.assertEqual(len(s.parts[0].flat.getElementsByClass('Chord')), 11)
self.assertEqual(len(s.parts[1].flat.getElementsByClass('Chord')), 11)
#s.show()
def testMultipleStavesPerPartC(self):
from music21 import corpus
s = corpus.parse('schoenberg/opus19/movement2')
self.assertEqual(len(s.parts), 2)
s = corpus.parse('schoenberg/opus19/movement6')
self.assertEqual(len(s.parts), 2)
#s.show()
def testSpannersA(self):
from music21 import converter
from music21.musicxml import testPrimitive
s = converter.parse(testPrimitive.spanners33a)
# this number will change as more are being imported
self.assertEqual(len(s.flat.spanners) >= 2, True)
#environLocal.printDebug(['pre s.measures(2,3)', 's', s])
ex = s.measures(2, 3) # this needs to get all spanners too
# all spanners are referenced over; even ones that may not be relevant
self.assertEqual(len(ex.flat.spanners), 14)
#ex.show()
# slurs are on measures 2, 3
# crescendos are on measures 4, 5
# wavy lines on measures 6, 7
# brackets etc. on measures 10-14
# glissando on measure 16
def testTextExpressionsA(self):
from music21 import converter
from music21.musicxml import testPrimitive
s = converter.parse(testPrimitive.textExpressions)
#s.show()
self.assertEqual(len(s.flat.getElementsByClass('TextExpression')), 3)
p1 = s.parts[0]
m1 = p1.getElementsByClass('Measure')[0]
self.assertEqual(len(m1.getElementsByClass('TextExpression')), 0)
# all in measure 2
m2 = p1.getElementsByClass('Measure')[1]
self.assertEqual(len(m2.getElementsByClass('TextExpression')), 3)
teStream = m2.getElementsByClass('TextExpression')
self.assertEqual([te.offset for te in teStream], [1.0, 1.5, 4.0])
#s.show()
def testTextExpressionsC(self):
from music21 import corpus
s = corpus.parse('bwv66.6')
p = s.parts[0]
for m in p.getElementsByClass('Measure'):
for n in m.flat.notes:
if n.pitch.name in ['B']:
msg = '%s\n%s' % (n.pitch.nameWithOctave, n.duration.quarterLength)
te = expressions.TextExpression(msg)
te.size = 14
te.style = 'bold'
te.justify = 'center'
te.enclosure = 'rectangle'
te.positionVertical = -80
m.insert(n.offset, te)
#p.show()
def testTextExpressionsD(self):
from music21 import corpus
# test placing text expression in arbitrary locations
s = corpus.parse('bwv66.6')
p = s.parts[-1] # get bass
for m in p.getElementsByClass('Measure')[1:]:
for pos in [1.5, 2.5]:
te = expressions.TextExpression(pos)
te.style = 'bold'
te.justify = 'center'
te.enclosure = 'rectangle'
m.insert(pos, te)
#p.show()
def testTextExpressionsE(self):
import random
s = stream.Stream()
for i in range(6):
m = stream.Measure(number=i + 1)
m.append(layout.SystemLayout(isNew=True))
m.append(note.Rest(type='whole'))
s.append(m)
for m in s.getElementsByClass('Measure'):
offsets = [x * .25 for x in range(16)]
random.shuffle(offsets)
offsets = offsets[:4]
for o in offsets:
te = expressions.TextExpression(o)
te.style = 'bold'
te.justify = 'center'
te.enclosure = 'rectangle'
m.insert(o, te)
#s.show()
def testImportRepeatExpressionsA(self):
# test importing from musicxml
from music21.musicxml import testPrimitive
from music21 import converter
# has one segno
s = converter.parse(testPrimitive.repeatExpressionsA)
self.assertEqual(len(s.flat.getElementsByClass(repeat.Segno)), 1)
self.assertEqual(len(s.flat.getElementsByClass(repeat.Fine)), 1)
self.assertEqual(len(s.flat.getElementsByClass(repeat.DalSegnoAlFine)), 1)
# has two codas
s = converter.parse(testPrimitive.repeatExpressionsB)
self.assertEqual(len(s.flat.getElementsByClass(repeat.Coda)), 2)
# has one d.c.al coda
self.assertEqual(len(s.flat.getElementsByClass(repeat.DaCapoAlCoda)), 1)
def testImportRepeatBracketA(self):
from music21 import corpus
# has repeats in it; start with single emasure
s = corpus.parse('opus74no1', 3)
# there are 2 for each part, totaling 8
self.assertEqual(len(s.flat.getElementsByClass('RepeatBracket')), 8)
# can get for each part as spanners are stored in Part now
# TODO: need to test getting repeat brackets after measure extraction
#s.parts[0].show() # 72 through 77
sSub = s.parts[0].measures(72, 77)
# 2 repeat brackets are gathered b/c they are stored at the Part by
# default
rbSpanners = sSub.getElementsByClass('RepeatBracket')
self.assertEqual(len(rbSpanners), 2)
def testImportVoicesA(self):
# testing problematic voice imports
from music21.musicxml import testPrimitive
from music21 import converter
# this 2 part segments was importing multiple voices within
# a measure, even though there was no data in the second voice
s = converter.parse(testPrimitive.mixedVoices1a)
self.assertEqual(len(s.parts), 2)
# there are voices, but they have been removed
self.assertEqual(len(s.parts[0].getElementsByClass(
'Measure')[0].voices), 0)
#s.parts[0].show('t')
#self.assertEqual(len(s.parts[0].voices), 2)
s = converter.parse(testPrimitive.mixedVoices1b)
self.assertEqual(len(s.parts), 2)
self.assertEqual(len(s.parts[0].getElementsByClass(
'Measure')[0].voices), 0)
#s.parts[0].show('t')
# this case, there were 4, but there should be 2
s = converter.parse(testPrimitive.mixedVoices2)
self.assertEqual(len(s.parts), 2)
self.assertEqual(len(s.parts[0].getElementsByClass(
'Measure')[0].voices), 2)
self.assertEqual(len(s.parts[1].getElementsByClass(
'Measure')[0].voices), 2)
#s.parts[0].show('t')
# s = converter.parse(testPrimitive.mixedVoices1b)
# s = converter.parse(testPrimitive.mixedVoices2)
def testImportMetronomeMarksA(self):
from music21.musicxml import testPrimitive
from music21 import converter
# has metronome marks defined, not with sound tag
s = converter.parse(testPrimitive.metronomeMarks31c)
# get all tempo indications
mms = s.flat.getElementsByClass('TempoIndication')
self.assertEqual(len(mms) > 3, True)
def testImportMetronomeMarksB(self):
pass
# TODO: look for files that only have sound tags and create MetronomeMarks
# need to look for bundling of Words text expressions with tempo
# has only sound tempo=x tag
#s = converter.parse(testPrimitive.articulations01)
#s.show()
def testImportGraceNotesA(self):
# test importing from musicxml
from music21.musicxml import testPrimitive
from music21 import converter
unused_s = converter.parse(testPrimitive.graceNotes24a)
#s.show()
def testChordalStemDirImport(self):
#NB: Finale apparently will not display a pitch that is a member of a chord without a stem
#unless all chord members are without stems.
from music21.musicxml import m21ToString
from music21 import converter
n1 = note.Note('f3')
n1.notehead = 'diamond'
n1.stemDirection = 'down'
n2 = note.Note('c4')
n2.stemDirection = 'noStem'
c = chord.Chord([n1, n2])
c.quarterLength = 2
xml = m21ToString.fromMusic21Object(c)
#print xml
#c.show()
inputStream = converter.parse(xml)
chordResult = inputStream.flat.notes[0]
# for n in chordResult:
# print n.stemDirection
self.assertEqual(chordResult.getStemDirection(chordResult.pitches[0]), 'down')
self.assertEqual(chordResult.getStemDirection(chordResult.pitches[1]), 'noStem')
def testStaffGroupsA(self):
from music21.musicxml import testPrimitive
from music21 import converter
s = converter.parse(testPrimitive.staffGroupsNested41d)
self.assertEqual(len(s.getElementsByClass('StaffGroup')), 2)
#raw = s.musicxml
sg1 = s.getElementsByClass('StaffGroup')[0]
self.assertEqual(sg1.symbol, 'brace')
self.assertEqual(sg1.barTogether, True)
sg2 = s.getElementsByClass('StaffGroup')[1]
self.assertEqual(sg2.symbol, 'line')
self.assertEqual(sg2.barTogether, True)
def testInstrumentTranspositionA(self):
from music21.musicxml import testPrimitive
from music21 import converter
s = converter.parse(testPrimitive.transposingInstruments72a)
i1 = s.parts[0].flat.getElementsByClass('Instrument')[0]
i2 = s.parts[1].flat.getElementsByClass('Instrument')[0]
i3 = s.parts[2].flat.getElementsByClass('Instrument')[0]
self.assertEqual(str(i1.transposition), '<music21.interval.Interval M-2>')
self.assertEqual(str(i2.transposition), '<music21.interval.Interval M-6>')
def testInstrumentTranspositionB(self):
from music21.musicxml import testPrimitive
from music21 import converter
s = converter.parse(testPrimitive.transposing01)
iStream1 = s.parts[0].flat.getElementsByClass('Instrument')
# three instruments; one initial, and then one for each transposition
self.assertEqual(len(iStream1), 3)
# should be 3
iStream2 = s.parts[1].flat.getElementsByClass('Instrument')
self.assertEqual(len(iStream2), 3)
i2 = iStream2[0]
iStream3 = s.parts[2].flat.getElementsByClass('Instrument')
self.assertEqual(len(iStream3), 1)
i3 = iStream3[0]
self.assertEqual(str(iStream1[0].transposition), 'None')
self.assertEqual(str(iStream1[1].transposition), '<music21.interval.Interval P-5>')
self.assertEqual(str(iStream1[2].transposition), '<music21.interval.Interval P1>')
self.assertEqual(str(iStream2[0].transposition), '<music21.interval.Interval M-2>')
self.assertEqual(str(iStream2[1].transposition), '<music21.interval.Interval m3>')
self.assertEqual(str(i3.transposition), '<music21.interval.Interval P-5>')
self.assertEqual(self.pitchOut([p for p in s.parts[0].flat.pitches]), '[A4, A4, A4, A4, A4, A4, A4, A4, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, A4, A4, A4, A4]')
self.assertEqual(self.pitchOut([p for p in s.parts[1].flat.pitches]), '[B4, B4, B4, B4, F#4, F#4, F#4, F#4, F#4, F#4, F#4, F#4, F#4, F#4, F#4, F#4, F#4, F#4, F#4, F#4, B4, B4, B4, B4, B4, B4]')
self.assertEqual(self.pitchOut([p for p in s.parts[2].flat.pitches]), '[E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5]')
sSounding = s.toSoundingPitch(inPlace=False)
self.assertEqual(self.pitchOut([p for p in sSounding.parts[0].flat.pitches]), '[A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4]')
self.assertEqual(self.pitchOut([p for p in sSounding.parts[1].flat.pitches]), '[A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4]')
self.assertEqual(self.pitchOut([p for p in sSounding.parts[2].flat.pitches]), '[A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4]')
# chordification by default places notes at sounding pitch
sChords = s.chordify()
self.assertEqual(self.pitchOut([p for p in sChords.flat.pitches]), '[A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4]')
#sChords.show()
def testInstrumentTranspositionC(self):
# generate all transpositions on output
from music21 import converter
from music21.musicxml import testPrimitive
s = converter.parse(testPrimitive.transposing01)
self.assertEqual(len(s.flat.getElementsByClass('Instrument')), 7)
#s.show()
def testHarmonyA(self):
from music21 import corpus
s = corpus.parse('leadSheet/berlinAlexandersRagtime.xml')
self.assertEqual(len(s.flat.getElementsByClass('ChordSymbol')), 19)
match = [h.chordKind for h in s.flat.getElementsByClass('ChordSymbol')]
self.assertEqual(match, [u'major', u'dominant', u'major', u'major', u'major', u'major', u'dominant', u'major', u'dominant', u'major', u'dominant', u'major', u'dominant', u'major', u'dominant', u'major', u'dominant', u'major', u'major'])
match = [str(h.root()) for h in s.flat.getElementsByClass('ChordSymbol')]
self.assertEqual(match, ['F3', 'C3', 'F3', 'B-2', 'F3', 'C3', 'G2', 'C3', 'C3', 'F3', 'C3', 'F3', 'F2', 'B-2', 'F2', 'F3', 'C3', 'F3', 'C3'])
match = set([str(h.figure) for h in s.flat.getElementsByClass('ChordSymbol')])
self.assertEqual(match, set(['F','F7','B-','C7','G7','C']))
s = corpus.parse('monteverdi/madrigal.3.12.xml')
self.assertEqual(len(s.flat.getElementsByClass('ChordSymbol')), 10)
s = corpus.parse('leadSheet/fosterBrownHair.xml')
self.assertEqual(len(s.flat.getElementsByClass('ChordSymbol')), 40)
#s.show()
def testOrnamentandTechnical(self):
from music21 import corpus
s = corpus.parse('opus133')
ex = s.parts[0]
countTrill = 0
for n in ex.flat.notes:
for e in n.expressions:
if 'Trill' in e.classes:
countTrill += 1
self.assertEqual(countTrill, 54)
# TODO: Get a better test... the single harmonic in the viola part, m. 482 is probably a mistake!
countTechnical = 0
for n in s.parts[2].flat.notes:
for a in n.articulations:
if 'TechnicalIndication' in a.classes:
countTechnical += 1
self.assertEqual(countTechnical, 1)
def testOrnamentC(self):
from music21 import converter
from music21.musicxml import testPrimitive
# has many ornaments
s = converter.parse(testPrimitive.notations32a)
#s.flat.show('t')
self.assertEqual(len(s.flat.getElementsByClass('Tremolo')), 1)
count = 0
for n in s.flat.notes:
for e in n.expressions:
if 'Turn' in e.classes:
count += 1
self.assertEqual(count, 4) # include inverted turn
count = 0
for n in s.flat.notes:
for e in n.expressions:
if 'InvertedTurn' in e.classes:
count += 1
self.assertEqual(count, 1)
count = 0
for n in s.flat.notes:
for e in n.expressions:
if 'Shake' in e.classes:
count += 1
self.assertEqual(count, 1)
count = 0
for n in s.flat.notes:
for e in n.expressions:
if 'Schleifer' in e.classes:
count += 1
self.assertEqual(count, 1)
def testTextBoxA(self):
from music21 import converter
from music21.musicxml import testPrimitive
s = converter.parse(testPrimitive.textBoxes01)
tbs = s.flat.getElementsByClass('TextBox')
self.assertEqual(len(tbs), 5)
msg = []
for tb in tbs:
msg.append(tb.content)
self.assertEqual(msg, [u'This is a text box!', u'pos 200/300 (lower left)', u'pos 1000/300 (lower right)', u'pos 200/1500 (upper left)', u'pos 1000/1500 (upper right)'])
def testImportSlursA(self):
from music21 import corpus
# this is a good test as this encoding uses staffs, not parts
# to encode both parts; this requires special spanner handling
s = corpus.parse('mozart/k545/movement1_exposition')
sf = s.flat
slurs = sf.getElementsByClass(spanner.Slur)
# TODO: this value should be 2, but due to staff encoding we
# have orphaned spanners that are not cleaned up
self.assertEqual(len(slurs), 4)
n1, n2 = s.parts[0].flat.notes[3], s.parts[0].flat.notes[5]
#environLocal.printDebug(['n1', n1, 'id(n1)', id(n1), slurs[0].getSpannedElementIds(), slurs[0].getSpannedElementIds()])
self.assertEqual(id(n1) == slurs[0].getSpannedElementIds()[0], True)
self.assertEqual(id(n2) == slurs[0].getSpannedElementIds()[1], True)
#environLocal.printDebug(['n2', n2, 'id(n2)', id(n2), slurs[0].getSpannedElementIds()])
def testImportWedgeA(self):
from music21 import converter
from music21.musicxml import testPrimitive
s = converter.parse(testPrimitive.spanners33a)
self.assertEqual(len(s.flat.getElementsByClass('Crescendo')), 1)
self.assertEqual(len(s.flat.getElementsByClass('Diminuendo')), 1)
def testImportWedgeB(self):
from music21 import converter
from music21.musicxml import testPrimitive
# this produces a single component cresc
s = converter.parse(testPrimitive.directions31a)
self.assertEqual(len(s.flat.getElementsByClass('Crescendo')), 2)
def testBracketImportB(self):
from music21 import converter
from music21.musicxml import testPrimitive
s = converter.parse(testPrimitive.spanners33a)
#s.show()
self.assertEqual(len(s.flat.getElementsByClass('Line')), 6)
def testTrillExtensionImportA(self):
from music21 import converter
from music21.musicxml import testPrimitive
s = converter.parse(testPrimitive.notations32a)
#s.show()
self.assertEqual(len(s.flat.getElementsByClass('TrillExtension')), 2)
def testGlissandoImportA(self):
from music21 import converter
from music21.musicxml import testPrimitive
s = converter.parse(testPrimitive.spanners33a)
#s.show()
self.assertEqual(len(s.flat.getElementsByClass('Glissando')), 1)
def testImportDashes(self):
# dashes are imported as Lines (as are brackets)
from music21 import converter
from music21.musicxml import testPrimitive
s = converter.parse(testPrimitive.spanners33a)
self.assertEqual(len(s.flat.getElementsByClass('Line')), 6)
def testImportGraceA(self):
from music21 import converter
from music21.musicxml import testPrimitive
s = converter.parse(testPrimitive.graceNotes24a)
#s.show()
match = [str(p) for p in s.pitches]
#print match
self.assertEqual(match, ['D5', 'C5', 'E5', 'D5', 'C5', 'D5', 'C5', 'D5', 'C5', 'D5', 'C5', 'E5', 'D5', 'C5', 'D5', 'C5', 'D5', 'C5', 'E5', 'E5', 'F4', 'C5', 'D#5', 'C5', 'D-5', 'A-4', 'C5', 'C5'])
def testBarException(self):
mxBarline = mxObjects.Barline()
mxBarline.set('barStyle', 'light-heavy')
#Rasing the BarException
self.assertRaises(bar.BarException, mxToRepeat, mxBarline)
mxRepeat = mxObjects.Repeat()
mxRepeat.set('direction', 'backward')
mxBarline.set('repeatObj', mxRepeat)
#all fine now, no exceptions here
mxToRepeat(mxBarline)
#Raising the BarException
mxBarline.set('barStyle', 'wunderbar')
self.assertRaises(bar.BarException, mxToRepeat, mxBarline)
#-------------------------------------------------------------------------------
# define presented order in documentation
_DOC_ORDER = [mxScoreToScore]
if __name__ == "__main__":
# sys.arg test options will be used in mainTest()
import music21
music21.mainTest(Test)
#------------------------------------------------------------------------------
# eof
| # -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: musicxml/fromMxObjects.py
# Purpose: Translate from MusicXML mxObjects to music21 objects
#
# Authors: <NAME>
# <NAME>
#
# Copyright: Copyright © 2010-2013 <NAME> and the music21 Project
# License: LGPL or BSD, see license.txt
#-------------------------------------------------------------------------------
'''
Low-level conversion routines from MusicXML to music21.
This module supposes that the musicxml document has already been parsed by xml.sax (by
base.Document.read() ) and is stored as a collection of mxObjects -- equivalent parsing
methods could be created and fed into `mxScoreToScore` to make this work.
'''
import copy
import pprint
import traceback
import unittest
from music21.musicxml import mxObjects
from music21 import common
from music21 import defaults
from music21 import exceptions21
from music21 import xmlnode
# modules that import this include converter.py.
# thus, cannot import these here
from music21 import articulations
from music21 import bar
from music21 import beam
from music21 import chord
from music21 import clef
from music21 import duration
from music21 import dynamics
from music21 import expressions
from music21 import harmony # for chord symbols
from music21 import instrument
from music21 import interval # for transposing instruments
from music21 import key
from music21 import layout
from music21 import metadata
from music21 import note
from music21 import meter
from music21 import pitch
from music21 import repeat
from music21 import spanner
from music21 import stream
from music21 import tempo
from music21 import text # for text boxes
from music21 import tie
from music21 import environment
_MOD = "musicxml.fromMxObjects"
environLocal = environment.Environment(_MOD)
#-------------------------------------------------------------------------------
class FromMxObjectsException(exceptions21.Music21Exception):
pass
class XMLBarException(FromMxObjectsException):
pass
# def mod6IdLocal(spannerObj):
# '''
# returns the spanner idLocal as a number from 1-6 since
# only 6 spanners of each type can be active at a time in musicxml
#
#
# >>> s = stream.Score()
# >>> for i in range(10):
# ... sp = spanner.Glissando()
# ... sp.idLocal = i + 1
# ... s.insert(0, sp)
# >>> for sp in s.getElementsByClass('Spanner'):
# ... print sp.idLocal, musicxml.fromMxObjects.mod6IdLocal(sp)
# 1 1
# 2 2
# 3 3
# 4 4
# 5 5
# 6 6
# 7 1
# 8 2
# 9 3
# 10 4
# '''
# spanId = spannerObj.idLocal
# if spanId is None:
# return 1
# mod6Id = spanId % 6
# if mod6Id == 0:
# mod6Id = 6
# return mod6Id
def configureStaffGroupFromMxPartGroup(staffGroup, mxPartGroup):
'''
Given an already instantiated spanner.StaffGroup,
configure it with parameters from an mxPartGroup.
'''
staffGroup.name = mxPartGroup.get('groupName')
staffGroup.abbreviation = mxPartGroup.get('groupAbbreviation')
staffGroup.symbol = mxPartGroup.get('groupSymbol')
staffGroup.barTogether = mxPartGroup.get('groupBarline')
staffGroup.completeStatus = True
def mxCreditToTextBox(mxCredit):
'''Convert a MusicXML credit to a music21 TextBox
>>> c = musicxml.mxObjects.Credit()
>>> c.append(musicxml.mxObjects.CreditWords('testing'))
>>> c.set('page', 2)
>>> tb = musicxml.fromMxObjects.mxCreditToTextBox(c)
>>> tb.page
2
>>> tb.content
'testing'
'''
tb = text.TextBox()
tb.page = mxCredit.get('page')
content = []
for mxCreditWords in mxCredit: # can iterate
content.append(mxCreditWords.charData)
if len(content) == 0: # no text defined
raise FromMxObjectsException('no credit words defined for a credit tag')
tb.content = '\n'.join(content) # join with \n
# take formatting from the first, no matter if multiple are defined
tb.positionVertical = mxCredit.componentList[0].get('default-y')
tb.positionHorizontal = mxCredit.componentList[0].get('default-x')
tb.justify = mxCredit.componentList[0].get('justify')
tb.style = mxCredit.componentList[0].get('font-style')
tb.weight = mxCredit.componentList[0].get('font-weight')
tb.size = mxCredit.componentList[0].get('font-size')
tb.alignVertical = mxCredit.componentList[0].get('valign')
tb.alignHorizontal = mxCredit.componentList[0].get('halign')
return tb
def mxTransposeToInterval(mxTranspose):
'''Convert a MusicXML Transpose object to a music21 Interval object.
>>> t = musicxml.mxObjects.Transpose()
>>> t.diatonic = -1
>>> t.chromatic = -2
>>> musicxml.fromMxObjects.mxTransposeToInterval(t)
<music21.interval.Interval M-2>
>>> t = musicxml.mxObjects.Transpose()
>>> t.diatonic = -5
>>> t.chromatic = -9
>>> musicxml.fromMxObjects.mxTransposeToInterval(t)
<music21.interval.Interval M-6>
>>> t = musicxml.mxObjects.Transpose()
>>> t.diatonic = 3 # a type of 4th
>>> t.chromatic = 6
>>> musicxml.fromMxObjects.mxTransposeToInterval(t)
<music21.interval.Interval A4>
'''
ds = None
if mxTranspose.diatonic is not None:
ds = int(mxTranspose.diatonic)
cs = None
if mxTranspose.chromatic is not None:
cs = int(mxTranspose.chromatic)
oc = 0
if mxTranspose.octaveChange is not None:
oc = int(mxTranspose.octaveChange) * 12
# NOTE: presently not dealing with double
# doubled one octave down from what is currently written
# (as is the case for mixed cello / bass parts in orchestral literature)
#environLocal.printDebug(['ds', ds, 'cs', cs, 'oc', oc])
if ds is not None and ds != 0 and cs is not None and cs != 0:
# diatonic step can be used as a generic specifier here if
# shifted 1 away from zero
if ds < 0:
post = interval.intervalFromGenericAndChromatic(ds - 1, cs + oc)
else:
post = interval.intervalFromGenericAndChromatic(ds + 1, cs + oc)
else: # assume we have chromatic; may not be correct spelling
post = interval.Interval(cs + oc)
return post
def mxToTempoIndication(mxMetronome, mxWords=None):
'''
Given an mxMetronome, convert to either a TempoIndication subclass,
either a tempo.MetronomeMark or tempo.MetricModulation.
>>> m = musicxml.mxObjects.Metronome()
>>> bu = musicxml.mxObjects.BeatUnit('half')
>>> pm = musicxml.mxObjects.PerMinute(125)
>>> m.append(bu)
>>> m.append(pm)
>>> musicxml.fromMxObjects.mxToTempoIndication(m)
<music21.tempo.MetronomeMark Half=125.0>
'''
# get lists of durations and texts
durations = []
numbers = []
dActive = None
for mxObj in mxMetronome.componentList:
if isinstance(mxObj, mxObjects.BeatUnit):
durationType = musicXMLTypeToType(mxObj.charData)
dActive = duration.Duration(type=durationType)
durations.append(dActive)
if isinstance(mxObj, mxObjects.BeatUnitDot):
if dActive is None:
raise FromMxObjectsException('encountered metronome components out of order')
dActive.dots += 1 # add one dot each time these are encountered
# should come last
if isinstance(mxObj, mxObjects.PerMinute):
#environLocal.printDebug(['found PerMinute', mxObj])
# store as a number
if mxObj.charData != '':
numbers.append(float(mxObj.charData))
if mxMetronome.isMetricModulation():
mm = tempo.MetricModulation()
#environLocal.printDebug(['found metric modulaton:', 'durations', durations])
if len(durations) < 2:
raise FromMxObjectsException('found incompletely specified musicxml metric moduation: '+
'fewer than two durations defined')
# all we have are referents, no values are defined in musicxml
# will need to update context after adding to Stream
mm.oldReferent = durations[0]
mm.newReferent = durations[1]
else:
#environLocal.printDebug(['found metronome mark:', 'numbers', numbers])
mm = tempo.MetronomeMark()
if len(numbers) > 0:
mm.number = numbers[0]
if len(durations) > 0:
mm.referent = durations[0]
# TODO: set text if defined in words
if mxWords is not None:
pass
paren = mxMetronome.get('parentheses')
if paren is not None:
if paren in ['yes']:
mm.parentheses = True
return mm
def mxToRepeat(mxBarline, inputM21=None):
'''
Given an mxBarline (not an mxRepeat object) with repeatObj as a parameter,
file the necessary parameters and return a bar.Repeat() object
>>> mxRepeat = musicxml.mxObjects.Repeat()
>>> mxRepeat.set('direction', 'backward')
>>> mxRepeat.get('times') == None
True
>>> mxBarline = musicxml.mxObjects.Barline()
>>> mxBarline.set('barStyle', 'light-heavy')
>>> mxBarline.set('repeatObj', mxRepeat)
>>> b = musicxml.fromMxObjects.mxToRepeat(mxBarline)
>>> b
<music21.bar.Repeat direction=end>
Test that the music21 style for a backwards repeat is called "final"
(because it resembles a final barline) but that the musicxml style
is called light-heavy.
>>> b.style
'final'
>>> b.direction
'end'
>>> mxBarline2 = musicxml.toMxObjects.repeatToMx(b)
>>> mxBarline2.get('barStyle')
'light-heavy'
'''
if inputM21 is None:
r = bar.Repeat()
else:
r = inputM21
r.style = mxBarline.get('barStyle')
location = mxBarline.get('location')
if location is not None:
r.location = location
mxRepeat = mxBarline.get('repeatObj')
if mxRepeat is None:
raise bar.BarException('attempting to create a Repeat from an MusicXML bar that does not ' +
'define a repeat')
mxDirection = mxRepeat.get('direction')
#environLocal.printDebug(['mxRepeat', mxRepeat, mxRepeat._attr])
if mxDirection.lower() == 'forward':
r.direction = 'start'
elif mxDirection.lower() == 'backward':
r.direction = 'end'
else:
raise bar.BarException('cannot handle mx direction format:', mxDirection)
if mxRepeat.get('times') != None:
# make into a number
r.times = int(mxRepeat.get('times'))
if inputM21 is None:
return r
def mxToBarline(mxBarline, inputM21 = None):
'''Given an mxBarline, fill the necessary parameters
>>> mxBarline = musicxml.mxObjects.Barline()
>>> mxBarline.set('barStyle', 'light-light')
>>> mxBarline.set('location', 'right')
>>> b = musicxml.fromMxObjects.mxToBarline(mxBarline)
>>> b.style # different in music21 than musicxml
'double'
>>> b.location
'right'
'''
if inputM21 is None:
b = bar.Barline()
else:
b = inputM21
b.style = mxBarline.get('barStyle')
location = mxBarline.get('location')
if location is not None:
b.location = location
if inputM21 is None:
return b
#-------------------------------------------------------------------------------
def mxGraceToGrace(noteOrChord, mxGrace=None):
'''
Given a completely formed, non-grace Note or Chord, create and
return a m21 grace version of the same.
If mxGrace is None, no change is made and the same object is returned.
'''
if mxGrace is None:
return noteOrChord
post = noteOrChord.getGrace()
if mxGrace.get('slash') in ['yes', None]:
post.duration.slash = True
else:
post.duration.slash = False
post.duration.stealTimePrevious = mxGrace.get('steal-time-previous')
post.duration.stealTimeFollowing = mxGrace.get('steal-time-following')
return post
#-------------------------------------------------------------------------------
# Pitch and pitch components
def mxToAccidental(mxAccidental, inputM21Object = None):
'''
>>> a = musicxml.mxObjects.Accidental()
>>> a.set('content', 'half-flat')
>>> a.get('content')
'half-flat'
>>> b = pitch.Accidental()
>>> bReference = musicxml.fromMxObjects.mxToAccidental(a, b)
>>> b is bReference
True
>>> b.name
'half-flat'
>>> b.alter
-0.5
'''
if inputM21Object == None:
acc = pitch.Accidental()
else:
acc = inputM21Object
mxName = mxAccidental.get('charData')
if mxName == "quarter-sharp":
name = "half-sharp"
elif mxName == "three-quarters-sharp":
name = "one-and-a-half-sharp"
elif mxName == "quarter-flat":
name = "half-flat"
elif mxName == "three-quarters-flat":
name = "one-and-a-half-flat"
elif mxName == "flat-flat":
name = "double-flat"
elif mxName == "sharp-sharp":
name = "double-sharp"
else:
name = mxName
# need to use set here to get all attributes up to date
acc.set(name)
return acc
def mxToPitch(mxNote, inputM21=None):
'''
Given a MusicXML Note object, set this Pitch object to its values.
>>> b = musicxml.mxObjects.Pitch()
>>> b.set('octave', 3)
>>> b.set('step', 'E')
>>> b.set('alter', -1)
>>> c = musicxml.mxObjects.Note()
>>> c.set('pitch', b)
>>> a = pitch.Pitch('g#4')
>>> a = musicxml.fromMxObjects.mxToPitch(c)
>>> print(a)
E-3
'''
if inputM21 == None:
p = pitch.Pitch()
else:
p = inputM21
# assume this is an object
mxPitch = mxNote.get('pitchObj')
mxAccidental = mxNote.get('accidentalObj')
p.step = mxPitch.get('step')
# sometimes we have an accidental defined but no alter value, due to
# a natural; need to look at mxAccidental directly
mxAccidentalCharData = None
if mxAccidental != None:
mxAccidentalCharData = mxAccidental.get('charData')
#environLocal.printDebug(['found mxAccidental charData', mxAccidentalCharData])
acc = mxPitch.get('alter')
# None is used in musicxml but not in music21
if acc != None or mxAccidentalCharData != None:
if mxAccidental is not None: # the source had wanted to show alter
try:
accObj = mxToAccidental(mxAccidental)
# used to to just use acc value
# self.accidental = Accidental(float(acc))
# better to use accObj if possible
p.accidental = accObj
p.accidental.displayStatus = True
except pitch.AccidentalException:
# MuseScore 0.9.6 generates Accidentals with empty objects
pass
else:
# here we generate an accidental object from the alter value
# but in the source, there was not a defined accidental
try:
p.accidental = pitch.Accidental(float(acc))
except pitch.AccidentalException:
raise FromMxObjectsException('incorrect accidental %s for pitch %s' % (str(acc), p))
p.accidental.displayStatus = False
p.octave = int(mxPitch.get('octave'))
return p
#-------------------------------------------------------------------------------
# Ties
def mxToTie(mxNote, inputM21=None):
'''
Translate a MusicXML :class:`~music21.musicxml.mxObjects.Note` (sic!) to a music21
:class:`~music21.tie.Tie` object according to its <tieList> parameter.
Only called if the mxObjects.Note has a tieList that is not blank, so as not to
create additional ties.
'''
if inputM21 == None:
t = tie.Tie()
else:
t = inputM21
mxTieList = mxNote.get('tieList')
if len(mxTieList) > 0:
# get all types and see what we have for this note
typesFound = []
for mxTie in mxTieList:
typesFound.append(mxTie.get('type'))
# trivial case: have only 1
if len(typesFound) == 1:
t.type = typesFound[0]
elif typesFound == ['stop', 'start']:
t.type = 'continue'
#self.type = 'start'
else:
environLocal.printDebug(['found unexpected arrangement of multiple tie types when ' +
'importing from musicxml:', typesFound])
# from old note.py code
# not sure this is necessary
# mxNotations = mxNote.get('notations')
# if mxNotations != None:
# mxTiedList = mxNotations.getTieds()
# should be sufficient to only get mxTieList
if inputM21 is None:
return t
#-------------------------------------------------------------------------------
# Lyrics
def mxToLyric(mxLyric, inputM21=None):
'''
Translate a MusicXML :class:`~music21.musicxml.mxObjects.Lyric` object to a
music21 :class:`~music21.note.Lyric` object.
If inputM21 is a :class:`~music21.note.Lyric` object, then the values of the
mxLyric are transfered there and nothing returned.
Otherwise, a new `Lyric` object is created and returned.
>>> mxLyric = musicxml.mxObjects.Lyric()
>>> mxLyric.set('text', 'word')
>>> mxLyric.set('number', 4)
>>> mxLyric.set('syllabic', 'single')
>>> lyricObj = note.Lyric()
>>> musicxml.fromMxObjects.mxToLyric(mxLyric, lyricObj)
>>> lyricObj
<music21.note.Lyric number=4 syllabic=single text="word">
Non-numeric MusicXML lyric "number"s are converted to identifiers:
>>> mxLyric.set('number', 'part2verse1')
>>> l2 = musicxml.fromMxObjects.mxToLyric(mxLyric)
>>> l2
<music21.note.Lyric number=0 identifier="part2verse1" syllabic=single text="word">
'''
if inputM21 is None:
l = note.Lyric()
else:
l = inputM21
l.text = mxLyric.get('text')
# This is new to account for identifiers
number = mxLyric.get('number')
if common.isNum(number):
l.number = number
else:
l.number = 0 #If musicXML lyric number is not a number, set it to 0. This tells the caller of
#mxToLyric that a new number needs to be given based on the lyrics context amongst other lyrics.
l.identifier = number
# Used to be l.number = mxLyric.get('number')
l.syllabic = mxLyric.get('syllabic')
if inputM21 is None:
return l
#-------------------------------------------------------------------------------
# Durations
def musicXMLTypeToType(value):
'''
Utility function to convert a MusicXML duration type to an music21 duration type.
Changes 'long' to 'longa' and deals with a Guitar Pro 5.2 bug in MusicXML
export, that exports a 32nd note with the type '32th'.
>>> musicxml.fromMxObjects.musicXMLTypeToType('long')
'longa'
>>> musicxml.fromMxObjects.musicXMLTypeToType('32th')
'32nd'
>>> musicxml.fromMxObjects.musicXMLTypeToType('quarter')
'quarter'
>>> musicxml.fromMxObjects.musicXMLTypeToType(None)
Traceback (most recent call last):
FromMxObjectsException...
'''
# MusicXML uses long instead of longa
if value not in duration.typeToDuration:
if value == 'long':
return 'longa'
elif value == '32th':
return '32nd'
else:
raise FromMxObjectsException('found unknown MusicXML type: %s' % value)
else:
return value
def mxToDuration(mxNote, inputM21=None):
'''
Translate a `MusicXML` :class:`~music21.musicxml.mxObjects.Note` object
to a music21 :class:`~music21.duration.Duration` object.
::
>>> a = musicxml.mxObjects.Note()
>>> a.setDefaults()
>>> m = musicxml.mxObjects.Measure()
>>> m.setDefaults()
>>> a.external['measure'] = m # assign measure for divisions ref
>>> a.external['divisions'] = m.external['divisions']
>>> c = duration.Duration()
>>> musicxml.fromMxObjects.mxToDuration(a, c)
<music21.duration.Duration 1.0>
>>> c.quarterLength
1.0
'''
if inputM21 == None:
d = duration.Duration()
else:
d = inputM21
if mxNote.external['measure'] == None:
raise FromMxObjectsException(
"cannot determine MusicXML duration without a reference to a measure (%s)" % mxNote)
mxDivisions = mxNote.external['divisions']
if mxNote.duration is not None:
if mxNote.get('type') is not None:
durationType = musicXMLTypeToType(mxNote.get('type'))
forceRaw = False
else: # some rests do not define type, and only define duration
durationType = None # no type to get, must use raw
forceRaw = True
mxDotList = mxNote.get('dotList')
# divide mxNote duration count by divisions to get qL
qLen = float(mxNote.duration) / float(mxDivisions)
# mxNotations = mxNote.get('notationsObj')
mxTimeModification = mxNote.get('timeModificationObj')
if mxTimeModification is not None:
tup = mxToTuplet(mxNote)
# get all necessary config from mxNote
#environLocal.printDebug(['created Tuplet', tup])
# need to see if there is more than one component
#self.components[0]._tuplets.append(tup)
else:
tup = None
# two ways to create durations, raw and cooked
if forceRaw:
#environLocal.printDebug(['forced to use raw duration', durRaw])
durRaw = duration.Duration() # raw just uses qLen
# the qLen set here may not be computable, but is not immediately
# computed until setting components
durRaw.quarterLength = qLen
try:
d.components = durRaw.components
except duration.DurationException:
environLocal.warn(['mxToDuration', 'supplying quarterLength of 1 as type is not ' +
'defined and raw quarterlength (%s) is not a computable duration' % qLen])
environLocal.printDebug(['mxToDuration', 'raw qLen', qLen, durationType,
'mxNote.duration:', mxNote.duration,
'last mxDivisions:', mxDivisions])
durRaw.quarterLength = 1.
else: # a cooked version builds up from pieces
durUnit = duration.DurationUnit()
durUnit.type = durationType
durUnit.dots = len(mxDotList)
if not tup == None:
durUnit.appendTuplet(tup)
durCooked = duration.Duration(components=[durUnit])
if durUnit.quarterLength != durCooked.quarterLength:
environLocal.printDebug(['error in stored MusicXML representaiton and ' +
'duration value', durCooked])
# old way just used qLen
#self.quarterLength = qLen
d.components = durCooked.components
# if mxNote.duration is None, this is a grace note, and duration
# is based entirely on type
if mxNote.duration is None:
durUnit = duration.DurationUnit()
durUnit.type = musicXMLTypeToType(mxNote.get('type'))
durUnit.dots = len(mxNote.get('dotList'))
d.components = [durUnit]
#environLocal.printDebug(['got mx duration of None', d])
return d
def mxToOffset(mxDirection, mxDivisions):
'''
Translate a MusicXML :class:`~music21.musicxml.mxObjects.Direction`
with an offset value to an offset in music21.
'''
if mxDivisions is None:
raise FromMxObjectsException(
"cannot determine MusicXML duration without a reference to a measure (%s)" % mxDirection)
if mxDirection.offset is None:
return 0.0
else:
#environLocal.printDebug(['mxDirection.offset', mxDirection.offset, 'mxDivisions', mxDivisions])
return float(mxDirection.offset) / float(mxDivisions)
def mxToTuplet(mxNote, inputM21Object = None):
'''
Given an mxNote, based on mxTimeModification
and mxTuplet objects, return a Tuplet object
(or alter the input object and then return it)
'''
if inputM21Object is None:
t = duration.Tuplet()
else:
t = inputM21Object
if t.frozen is True:
raise duration.TupletException("A frozen tuplet (or one attached to a duration) " +
"is immutable")
mxTimeModification = mxNote.get('timeModificationObj')
#environLocal.printDebug(['got mxTimeModification', mxTimeModification])
t.numberNotesActual = int(mxTimeModification.get('actual-notes'))
t.numberNotesNormal = int(mxTimeModification.get('normal-notes'))
mxNormalType = mxTimeModification.get('normal-type')
# TODO: implement dot
# mxNormalDot = mxTimeModification.get('normal-dot')
if mxNormalType != None:
# this value does not seem to frequently be supplied by mxl
# encodings, unless it is different from the main duration
# this sets both actual and noraml types to the same type
t.setDurationType(musicXMLTypeToType(
mxTimeModification.get('normal-type')))
else: # set to type of duration
t.setDurationType(musicXMLTypeToType(mxNote.get('type')))
mxNotations = mxNote.get('notationsObj')
#environLocal.printDebug(['got mxNotations', mxNotations])
if mxNotations != None and len(mxNotations.getTuplets()) > 0:
mxTuplet = mxNotations.getTuplets()[0] # a list, but only use first
#environLocal.printDebug(['got mxTuplet', mxTuplet])
t.type = mxTuplet.get('type')
t.bracket = mxObjects.yesNoToBoolean(mxTuplet.get('bracket'))
#environLocal.printDebug(['got bracket', self.bracket])
t.placement = mxTuplet.get('placement')
return t
#-------------------------------------------------------------------------------
# Meters
def mxToTimeSignature(mxTimeList, inputM21=None):
'''
Given an mxTimeList, load this object
if inputM21 is None, create a new TimeSignature
and return it.
>>> mxTime = musicxml.mxObjects.Time()
>>> mxTime.setDefaults()
>>> mxAttributes = musicxml.mxObjects.Attributes()
>>> mxAttributes.timeList.append(mxTime)
>>> ts = meter.TimeSignature()
>>> musicxml.fromMxObjects.mxToTimeSignature(mxAttributes.timeList, ts)
>>> ts.numerator
4
'''
if inputM21 is None:
ts = meter.TimeSignature()
else:
ts = inputM21
if not common.isListLike(mxTimeList): # if just one
mxTime = mxTimeList
else: # there may be more than one if we have more staffs per part
mxTime = mxTimeList[0]
n = []
d = []
for obj in mxTime.componentList:
if isinstance(obj, mxObjects.Beats):
n.append(obj.charData) # may be 3+2
if isinstance(obj, mxObjects.BeatType):
d.append(obj.charData)
#n = mxTime.get('beats')
#d = mxTime.get('beat-type')
# convert into a string
msg = []
for i in range(len(n)):
msg.append('%s/%s' % (n[i], d[i]))
#environLocal.printDebug(['loading meter string:', '+'.join(msg)])
ts.load('+'.join(msg))
if inputM21 is None:
return ts
#--------------------------------------------------------
# Key/KeySignatures
def mxKeyListToKeySignature(mxKeyList, inputM21 = None):
'''
Given a mxKey object or keyList, return a music21.key.KeySignature
object and return it, or if inputM21 is None, change its
attributes and return nothing.
>>> mxk = musicxml.mxObjects.Key()
>>> mxk.set('fifths', 5)
>>> ks = key.KeySignature()
>>> musicxml.fromMxObjects.mxKeyListToKeySignature(mxk, ks)
>>> ks.sharps
5
Or just get a new KeySignature object from scratch:
>>> mxk.set('fifths', -2)
>>> ks2 = musicxml.fromMxObjects.mxKeyListToKeySignature(mxk)
>>> ks2
<music21.key.KeySignature of 2 flats>
'''
if inputM21 is None:
ks = key.KeySignature()
else:
ks = inputM21
if not common.isListLike(mxKeyList):
mxKey = mxKeyList
else: # there may be more than one if we have more staffs per part
mxKey = mxKeyList[0]
fifths = mxKey.get('fifths')
if fifths is None:
fifths = 0
ks.sharps = int(fifths)
mxMode = mxKey.get('mode')
if mxMode != None:
ks.mode = mxMode
if inputM21 is None:
return ks
#--------------------------------------------------------
# clefs
def mxClefToClef(mxClefList, inputM21 = None):
'''
Given a MusicXML Clef object, return a music21
Clef object
>>> a = musicxml.mxObjects.Clef()
>>> a.set('sign', 'G')
>>> a.set('line', 2)
>>> b = clef.Clef()
>>> b
<music21.clef.Clef>
>>> 'TrebleClef' in b.classes
False
>>> musicxml.fromMxObjects.mxClefToClef(a, b)
>>> b.sign
'G'
>>> 'TrebleClef' in b.classes
True
>>> b
<music21.clef.TrebleClef>
Create a new clef from thin air:
>>> a = musicxml.mxObjects.Clef()
>>> a.set('sign', 'TAB')
>>> c = musicxml.fromMxObjects.mxClefToClef(a)
>>> c
<music21.clef.TabClef>
'''
if not common.isListLike(mxClefList):
mxClef = mxClefList # its not a list
else: # just get first for now
mxClef = mxClefList[0]
sign = mxClef.get('sign')
if sign in ['TAB', 'percussion', 'none']:
clefObj = clef.clefFromString(sign)
else:
line = mxClef.get('line')
mxOctaveChange = mxClef.get('clefOctaveChange')
if mxOctaveChange != None:
octaveChange = int(mxOctaveChange)
else:
octaveChange = 0
clefObj = clef.clefFromString(sign + str(line), octaveChange)
if inputM21 is None:
return clefObj
else:
inputM21._classes = None
inputM21.__class__ = clefObj.__class__
inputM21.sign = clefObj.sign
inputM21.line = clefObj.line
inputM21.octaveChange = clefObj.octaveChange
#-------------------------------------------------------------------------------
# Dynamics
def mxToDynamicList(mxDirection):
'''
Given an mxDirection, load instance
>>> mxDirection = musicxml.mxObjects.Direction()
>>> mxDirectionType = musicxml.mxObjects.DirectionType()
>>> mxDynamicMark = musicxml.mxObjects.DynamicMark('ff')
>>> mxDynamics = musicxml.mxObjects.Dynamics()
>>> mxDynamics.set('default-y', -20)
>>> mxDynamics.append(mxDynamicMark)
>>> mxDirectionType.append(mxDynamics)
>>> mxDirection.append(mxDirectionType)
>>> a = dynamics.Dynamic()
>>> a = musicxml.fromMxObjects.mxToDynamicList(mxDirection)[0]
>>> a.value
'ff'
>>> a.englishName
'very loud'
>>> a._positionDefaultY
-20
'''
# can probably replace this with mxDirection.getDynamicMark()
# need to test
mxDynamics = None
for mxObj in mxDirection:
if isinstance(mxObj, mxObjects.DirectionType):
for mxObjSub in mxObj:
if isinstance(mxObjSub, mxObjects.Dynamics):
mxDynamics = mxObjSub
if mxDynamics == None:
raise dynamics.DynamicException('when importing a Dynamics object from MusicXML, ' +
'did not find a DynamicMark')
# if len(mxDynamics) > 1:
# raise dynamics.DynamicException('when importing a Dynamics object from MusicXML, '
# 'found more than one DynamicMark contained, namely %s' %
# str(mxDynamics))
post = []
for sub in mxDynamics.componentList:
d = dynamics.Dynamic()
# palcement is found in outermost object
if mxDirection.get('placement') is not None:
d._positionPlacement = mxDirection.get('placement')
# the tag is the dynamic mark value
mxDynamicMark = sub.get('tag')
d.value = mxDynamicMark
for dst, src in [('_positionDefaultX', 'default-x'),
('_positionDefaultY', 'default-y'),
('_positionRelativeX', 'relative-x'),
('_positionRelativeY', 'relative-y')]:
if mxDynamics.get(src) is not None:
setattr(d, dst, mxDynamics.get(src))
post.append(d)
return post
def mxToTextExpression(mxDirection):
'''
Given an mxDirection, create one or more TextExpressions
'''
post = []
mxWordsList = mxDirection.getWords()
for mxWords in mxWordsList:
#environLocal.printDebug(['mxToTextExpression()', mxWords, mxWords.charData])
# content can be passed with creation argument
te = expressions.TextExpression(mxWords.charData)
te.justify = mxWords.get('justify')
te.size = mxWords.get('font-size')
te.letterSpacing = mxWords.get('letter-spacing')
te.enclosure = mxWords.get('enclosure')
te.positionVertical = mxWords.get('default-y')
# two parameters that are combined
style = mxWords.get('font-style')
if style == 'normal':
style = None
weight = mxWords.get('font-weight')
if weight == 'normal':
weight = None
if style is not None and weight is not None:
if style == 'italic' and weight == 'bold':
te.style = 'bolditalic'
# one is None
elif style == 'italic':
te.style = 'italic'
elif weight == 'bold':
te.style = 'bold'
post.append(te)
return post
def mxToCoda(mxCoda):
'''
Translate a MusicXML :class:`~music21.musicxml.mxObjects.Coda` object
to a music21 :class:`~music21.repeat.Coda` object.
'''
rm = repeat.Coda()
rm._positionDefaultX = mxCoda.get('default-x')
rm._positionDefaultY = mxCoda.get('default-y')
return rm
def mxToSegno(mxCoda):
'''
Translate a MusicXML :class:`~music21.musicxml.mxObjects.Coda` object
to a music21 :class:`~music21.repeat.Coda` object.
'''
rm = repeat.Segno()
rm._positionDefaultX = mxCoda.get('default-x')
rm._positionDefaultY = mxCoda.get('default-y')
return rm
def mxToRepeatExpression(mxDirection):
'''
Given an mxDirection that may define a coda, segno, or other repeat
expression statement, realize the appropriate music21 object.
'''
pass
# note: this may not be needed, as mx text expressions are converted to repeat objects in measure processing
#-------------------------------------------------------------------------------
# Harmony
def mxToChordSymbol(mxHarmony):
'''
Convert a musicxml.mxObjects.Harmony() object to a harmony.ChordSymbol object:
::
>>> mxHarmony = musicxml.mxObjects.Harmony()
>>> mxKind = musicxml.mxObjects.Kind()
>>> mxKind.charData = 'major-seventh'
>>> mxHarmony.kindObj = mxKind
>>> mxRoot = musicxml.mxObjects.Root()
>>> mxRoot.set('root-step', 'D')
>>> mxRoot.set('root-alter', '-1')
>>> mxHarmony.rootObj = mxRoot
>>> cs = musicxml.fromMxObjects.mxToChordSymbol(mxHarmony)
>>> cs
<music21.harmony.ChordSymbol D-maj7>
::
>>> cs.figure
'D-maj7'
::
>>> cs.pitches
(<music21.pitch.Pitch D-3>, <music21.pitch.Pitch F3>, <music21.pitch.Pitch A-3>, <music21.pitch.Pitch C4>)
::
>>> cs.root()
<music21.pitch.Pitch D-3>
TODO: this is very classically-oriented. Make it more Jazz/Rock like.
::
>>> mxKind.charData = 'major-sixth'
>>> cs = musicxml.fromMxObjects.mxToChordSymbol(mxHarmony)
>>> cs
<music21.harmony.ChordSymbol D-6>
::
>>> cs.figure
'D-6'
::
>>> cs.pitches
(<music21.pitch.Pitch D-3>, <music21.pitch.Pitch F3>, <music21.pitch.Pitch A-3>, <music21.pitch.Pitch B-3>)
::
>>> cs.root()
<music21.pitch.Pitch D-3>
'''
#environLocal.printDebug(['mxToChordSymbol():', mxHarmony])
cs = harmony.ChordSymbol()
mxKind = mxHarmony.get('kind')
if mxKind is not None:
cs.chordKind = mxKind.charData
mxKindText = mxKind.get('text')
if mxKindText is not None:
cs.chordKindStr = mxKindText
mxRoot = mxHarmony.get('root')
if mxRoot is not None:
r = pitch.Pitch(mxRoot.get('rootStep'))
if mxRoot.get('rootAlter') is not None:
# can provide integer to create accidental on pitch
r.accidental = pitch.Accidental(int(mxRoot.get('rootAlter')))
# set Pitch object on Harmony
cs.root(r)
mxBass = mxHarmony.get('bass')
if mxBass is not None:
b = pitch.Pitch(mxBass.get('bassStep'))
if mxBass.get('bassAlter') is not None:
# can provide integer to create accidental on pitch
b.accidental = pitch.Accidental(int(mxBass.get('bassAlter')))
# set Pitch object on Harmony
cs.bass(b)
else:
cs.bass(r) #set the bass to the root if root is none
mxInversion = mxHarmony.get('inversion')
if mxInversion is not None:
cs.inversion(int(mxInversion), transposeOnSet=False) # must be an int
mxFunction = mxHarmony.get('function')
if mxFunction is not None:
cs.romanNumeral = mxFunction # goes to roman property
mxDegree = mxHarmony.get('degree')
if mxDegree is not None: # a list of components
ChordStepModifications = []
hd = None
for mxSub in mxDegree.componentList:
# this is the assumed order of triples
if isinstance(mxSub, mxObjects.DegreeValue):
if hd is not None: # already set
ChordStepModifications.append(hd)
hd = None
if hd is None:
hd = harmony.ChordStepModification()
hd.degree = int(mxSub.charData)
elif isinstance(mxSub, mxObjects.DegreeAlter):
hd.interval = int(mxSub.charData)
elif isinstance(mxSub, mxObjects.DegreeType):
hd.modType = mxSub.charData
else:
raise FromMxObjectsException('found unexpected object in degree tag: %s' % mxSub)
# must get last on loop exit
if hd is not None:
ChordStepModifications.append(hd)
for hd in ChordStepModifications:
cs.addChordStepModification(hd)
cs._updatePitches()
#environLocal.printDebug(['mxToHarmony(): Harmony object', h])
if cs.root().name != r.name:
cs.root(r)
return cs
#-------------------------------------------------------------------------------
# Instruments
def mxToInstrument(mxScorePart, inputM21=None):
'''
Return a generic instrument.Instrument object from this mxScorePart
'''
# note: transposition values is not set in this operation, but in
# mxToStreamPart
if inputM21 is None:
i = instrument.Instrument()
else:
i = inputM21
def _cleanStr(badStr):
# need to remove badly-formed strings
if badStr is None:
return None
badStr = badStr.strip()
goodStr = badStr.replace('\n', ' ')
return goodStr
i.partId = _cleanStr(mxScorePart.get('id'))
i.partName = _cleanStr(mxScorePart.get('partName'))
i.partAbbreviation = _cleanStr(mxScorePart.get('partAbbreviation'))
# for now, just get first instrument
if len(mxScorePart.scoreInstrumentList) > 0:
mxScoreInstrument = mxScorePart.scoreInstrumentList[0]
i.instrumentName = _cleanStr(mxScoreInstrument.get('instrumentName'))
i.instrumentAbbreviation = _cleanStr(mxScoreInstrument.get(
'instrumentAbbreviation'))
if len(mxScorePart.midiInstrumentList) > 0:
# for now, just get first midi instrument
mxMIDIInstrument = mxScorePart.midiInstrumentList[0]
# musicxml counts from 1, not zero
mp = mxMIDIInstrument.get('midiProgram')
if mp is not None:
i.midiProgram = int(mp) - 1
mc = mxMIDIInstrument.get('midiChannel')
if mc is not None:
i.midiChannel = int(mc) - 1
if inputM21 is None:
return i
#-------------------------------------------------------------------------------
# unified processors for Chords and Notes
def mxNotationsToSpanners(target, mxNotations, spannerBundle):
'''
General routines for gathering spanners from notes via mxNotations objects and placing them
in a spanner bundle.
Spanners may be found in musicXML notations and directions objects.
The passed-in spannerBundle will be edited in-place; existing spanners may be completed, or
new spanners may be added.
The `target` object is a reference to the relevant music21 object this spanner is associated
with.
'''
mxSlurList = mxNotations.getSlurs()
for mxObj in mxSlurList:
# look at all spanners and see if we have an open, matching
# slur to place this in
idFound = mxObj.get('number')
# returns a new spanner bundle with just the result of the search
#environLocal.printDebug(['spanner bundle: getByCompleteStatus(False)', spannerBundle.getByCompleteStatus(False)])
#sb = spannerBundle.getByIdLocal(idFound).getByCompleteStatus(False)
sb = spannerBundle.getByClassIdLocalComplete('Slur', idFound, False)
if len(sb) > 0: # if we already have a slur
#environLocal.printDebug(['found a match in SpannerBundle'])
su = sb[0] # get the first
else: # create a new slur
su = spanner.Slur()
su.idLocal = idFound
su.placement = mxObj.get('placement')
spannerBundle.append(su)
# add a reference of this note to this spanner
su.addSpannedElements(target)
#environLocal.printDebug(['adding n', n, id(n), 'su.getSpannedElements', su.getSpannedElements(), su.getSpannedElementIds()])
if mxObj.get('type') == 'stop':
su.completeStatus = True
# only add after complete
mxWavyLineList = mxNotations.getWavyLines()
for mxObj in mxWavyLineList:
#environLocal.printDebug(['waveyLines', mxObj])
idFound = mxObj.get('number')
sb = spannerBundle.getByClassIdLocalComplete('TrillExtension',
idFound, False)
if len(sb) > 0: # if we already have
su = sb[0] # get the first
else: # create a new spanner
su = expressions.TrillExtension()
su.idLocal = idFound
su.placement = mxObj.get('placement')
spannerBundle.append(su)
# add a reference of this note to this spanner
su.addSpannedElements(target)
if mxObj.get('type') == 'stop':
su.completeStatus = True
# only add after complete
mxTremoloList = mxNotations.getTremolos()
for mxObj in mxTremoloList:
environLocal.printDebug(['mxTremoloList', mxObj])
idFound = mxObj.get('number')
sb = spannerBundle.getByClassIdLocalComplete('Tremolo',
idFound, False)
if len(sb) > 0: # if we already have
su = sb[0] # get the first
else: # create a new spanner
environLocal.printDebug(['creating Tremolo'])
su = expressions.Tremolo()
su.idLocal = idFound
#su.placement = mxObj.get('placement')
spannerBundle.append(su)
# add a reference of this note to this spanner
su.addSpannedElements(target)
# can be stop or None; we can have empty single-element tremolo
if mxObj.get('type') in ['stop', None]:
su.completeStatus = True
# only add after complete
mxGlissandoList = mxNotations.getGlissandi()
for mxObj in mxGlissandoList:
idFound = mxObj.get('number')
sb = spannerBundle.getByClassIdLocalComplete('Glissando',
idFound, False)
if len(sb) > 0: # if we already have
su = sb[0] # get the first
else: # create a new spanner
su = spanner.Glissando()
su.idLocal = idFound
su.lineType = mxObj.get('line-type')
spannerBundle.append(su)
# add a reference of this note to this spanner
su.addSpannedElements(target)
if mxObj.get('type') == 'stop':
su.completeStatus = True
# only add after complete
def mxDirectionToSpanners(targetLast, mxDirection, spannerBundle):
'''Some spanners, such as MusicXML octave-shift, are encoded as MusicXML directions.
'''
mxWedge = mxDirection.getWedge()
if mxWedge is not None:
mxType = mxWedge.get('type')
idFound = mxWedge.get('number')
#environLocal.printDebug(['mxDirectionToSpanners', 'found mxWedge', mxType, idFound])
if mxType == 'crescendo':
sp = dynamics.Crescendo()
sp.idLocal = idFound
spannerBundle.append(sp)
# define this spanner as needing component assignment from
# the next general note
spannerBundle.setPendingSpannedElementAssignment(sp, 'GeneralNote')
elif mxType == 'diminuendo':
sp = dynamics.Diminuendo()
sp.idLocal = idFound
spannerBundle.append(sp)
spannerBundle.setPendingSpannedElementAssignment(sp, 'GeneralNote')
elif mxType == 'stop':
# need to retrieve an existing spanner
# try to get base class of both Crescendo and Decrescendo
sp = spannerBundle.getByClassIdLocalComplete('DynamicWedge',
idFound, False)[0] # get first
sp.completeStatus = True
# will only have a target if this follows the note
if targetLast is not None:
sp.addSpannedElements(targetLast)
else:
raise FromMxObjectsException('unidentified mxType of mxWedge:', mxType)
mxBracket = mxDirection.getBracket()
if mxBracket is not None:
mxType = mxBracket.get('type')
idFound = mxBracket.get('number')
#environLocal.printDebug(['mxDirectionToSpanners', 'found mxBracket', mxType, idFound])
if mxType == 'start':
sp = spanner.Line()
sp.idLocal = idFound
sp.startTick = mxBracket.get('line-end')
sp.startHeight = mxBracket.get('end-length')
sp.lineType = mxBracket.get('line-type')
spannerBundle.append(sp)
# define this spanner as needing component assignment from
# the next general note
spannerBundle.setPendingSpannedElementAssignment(sp, 'GeneralNote')
elif mxType == 'stop':
# need to retrieve an existing spanner
# try to get base class of both Crescendo and Decrescendo
sp = spannerBundle.getByClassIdLocalComplete('Line',
idFound, False)[0] # get first
sp.completeStatus = True
sp.endTick = mxBracket.get('line-end')
sp.endHeight = mxBracket.get('end-length')
sp.lineType = mxBracket.get('line-type')
# will only have a target if this follows the note
if targetLast is not None:
sp.addSpannedElements(targetLast)
else:
raise FromMxObjectsException('unidentified mxType of mxBracket:', mxType)
mxDashes = mxDirection.getDashes()
# import mxDashes as m21 Line objects
if mxDashes is not None:
mxType = mxDashes.get('type')
idFound = mxDashes.get('number')
#environLocal.printDebug(['mxDirectionToSpanners', 'found mxDashes', mxType, idFound])
if mxType == 'start':
sp = spanner.Line()
sp.idLocal = idFound
sp.startTick = 'none'
sp.lineType = 'dashed'
spannerBundle.append(sp)
# define this spanner as needing component assignment from
# the next general note
spannerBundle.setPendingSpannedElementAssignment(sp, 'GeneralNote')
elif mxType == 'stop':
# need to retrieve an existing spanner
# try to get base class of both Crescendo and Decrescendo
sp = spannerBundle.getByClassIdLocalComplete('Line',
idFound, False)[0] # get first
sp.completeStatus = True
sp.endTick = 'none'
# will only have a target if this follows the note
if targetLast is not None:
sp.addSpannedElements(targetLast)
else:
raise FromMxObjectsException('unidentified mxType of mxBracket:', mxType)
#-------------------------------------------------------------------------------
def mxFermataToFermata(mxFermata, inputM21 = None):
'''
Convert an mxFermata object to a music21 expressions.Fermata
object.
If inputM21 is None, creates a new Fermata object
and returns it. Otherwise changes the current Fermata
object and returns nothing.
>>> mxFermata = musicxml.mxObjects.Fermata()
>>> mxFermata.set('type', 'inverted')
>>> fermata = musicxml.fromMxObjects.mxFermataToFermata(mxFermata)
>>> fermata.type
'inverted'
'''
if inputM21 is None:
fermata = expressions.Fermata()
else:
fermata = inputM21
fermata.type = mxFermata.get('type')
if inputM21 is None:
return fermata
def mxTechnicalToArticulation(mxTechnicalMark, inputM21 = None):
'''
Convert an mxTechnicalMark to a music21.articulations.TechnicalIndication object or one
of its subclasses.
Example: Provided an musicxml.mxObjects.TechnicalMark object (not an mxTechnical object)
configure the music21 object.
Create both a musicxml.mxObjects.ArticulationMark object and a conflicting music21 object:
>>> mxTechnicalMark = musicxml.mxObjects.TechnicalMark('up-bow')
>>> mxTechnicalMark.set('placement', 'below')
>>> a = articulations.DownBow()
>>> a.placement = 'above'
Now override the music21 object with the mxArticulationMark object's characteristics
>>> musicxml.fromMxObjects.mxTechnicalToArticulation(mxTechnicalMark, inputM21 = a)
>>> 'DownBow' in a.classes
False
>>> 'UpBow' in a.classes
True
>>> a.placement
'below'
'''
mappingList = {'up-bow' : articulations.UpBow,
'down-bow' : articulations.DownBow,
'harmonic' : articulations.Harmonic,
'open-string' : articulations.OpenString,
'thumb-position' : articulations.StringThumbPosition,
'fingering' : articulations.StringFingering,
'pluck' : articulations.FrettedPluck,
'double-tongue' : articulations.DoubleTongue,
'triple-tongue' : articulations.TripleTongue,
'stopped' : articulations.Stopped,
'snap-pizzicato' : articulations.SnapPizzicato,
'fret' : articulations.FretIndication,
'string' : articulations.StringIndication,
'hammer-on' : articulations.HammerOn,
'pull-off' : articulations.PullOff,
'bend' : articulations.FretBend,
'tap' : articulations.FretTap,
'heel' : articulations.OrganHeel,
'toe' : articulations.OrganToe,
'fingernails' : articulations.HarpFingerNails,
'other-technical' : articulations.TechnicalIndication,
}
mxName = mxTechnicalMark.tag
if mxName not in mappingList:
environLocal.printDebug("Cannot translate %s in %s." % (mxName, mxTechnicalMark))
artClass = mappingList[mxName]
if inputM21 is None:
art = artClass()
else:
art = inputM21
art.__class__ = artClass
try:
art.placement = mxTechnicalMark.get('placement')
except xmlnode.XMLNodeException:
pass
if inputM21 is None:
return art
def mxArticulationToArticulation(mxArticulationMark, inputM21 = None):
'''
Convert an mxArticulationMark to a music21.articulations.Articulation
object or one of its subclasses.
Example: Provided an musicxml.mxObjects.ArticulationMark object (not an mxArticulations object)
configure the music21 object.
Create both a musicxml.mxObjects.ArticulationMark object and a conflicting music21 object:
>>> mxArticulationMark = musicxml.mxObjects.ArticulationMark('accent')
>>> mxArticulationMark.set('placement', 'below')
>>> a = articulations.Tenuto()
>>> a.placement = 'above'
Now override the music21 object with the mxArticulationMark object's characteristics
>>> musicxml.fromMxObjects.mxArticulationToArticulation(mxArticulationMark, inputM21 = a)
>>> 'Tenuto' in a.classes
False
>>> 'Accent' in a.classes
True
>>> a.placement
'below'
'''
mappingList = {'accent' : articulations.Accent,
'strong-accent' : articulations.StrongAccent,
'staccato' : articulations.Staccato,
'staccatissimo' : articulations.Staccatissimo,
'spiccato' : articulations.Spiccato,
'tenuto' : articulations.Tenuto,
'detached-legato' : articulations.DetachedLegato,
'scoop' : articulations.Scoop,
'plop' : articulations.Plop,
'doit' : articulations.Doit,
'falloff' : articulations.Falloff,
'breath-mark' : articulations.BreathMark,
'caesura' : articulations.Caesura,
'stress' : articulations.Stress,
'unstress' : articulations.Unstress,
'other-articulation': articulations.Articulation,
}
mxName = mxArticulationMark.tag
if mxName not in mappingList:
environLocal.printDebug("Cannot translate %s in %s." % (mxName, mxArticulationMark))
artClass = mappingList[mxName]
if inputM21 is None:
art = artClass()
else:
art = inputM21
art.__class__ = artClass
art.placement = mxArticulationMark.get('placement')
if inputM21 is None:
return art
def mxOrnamentToExpressionOrArticulation(mxOrnament):
'''
Convert mxOrnament into a music21 ornament.
This only processes non-spanner ornaments.
Many mxOrnaments are spanners: these are handled elsewhere.
Returns None if cannot be converted or not defined.
'''
orn = None
#environLocal.printDebug(['calling mxOrnamentToExpressionOrArticulation with', mxOrnament])
if isinstance(mxOrnament, mxObjects.TrillMark):
orn = expressions.Trill()
orn.placement = mxOrnament.get('placement')
elif isinstance(mxOrnament, mxObjects.Mordent):
orn = expressions.Mordent()
elif isinstance(mxOrnament, mxObjects.InvertedMordent):
orn = expressions.InvertedMordent()
elif isinstance(mxOrnament, mxObjects.Turn):
orn = expressions.Turn()
elif isinstance(mxOrnament, mxObjects.InvertedTurn):
orn = expressions.InvertedTurn()
elif isinstance(mxOrnament, mxObjects.Shake):
orn = expressions.Shake()
elif isinstance(mxOrnament, mxObjects.Schleifer):
orn = expressions.Schleifer()
return orn # may be None
#-------------------------------------------------------------------------------
# Chords
def mxToChord(mxNoteList, inputM21=None, spannerBundle=None):
'''
Given an a list of mxNotes, fill the necessary parameters
>>> a = musicxml.mxObjects.Note()
>>> p = musicxml.mxObjects.Pitch()
>>> p.set('step', 'A')
>>> p.set('octave', 3)
>>> a.setDefaults()
>>> a.set('pitch', p)
>>> b = musicxml.mxObjects.Note()
>>> b.setDefaults()
>>> b.set('chord', True)
>>> m = musicxml.mxObjects.Measure()
>>> m.setDefaults()
>>> a.external['measure'] = m # assign measure for divisions ref
>>> a.external['divisions'] = m.external['divisions']
>>> b.external['measure'] = m # assign measure for divisions ref
>>> b.external['divisions'] = m.external['divisions']
>>> c = musicxml.fromMxObjects.mxToChord([a, b])
>>> len(c.pitches)
2
>>> c.pitches[0]
<music21.pitch.Pitch A3>
>>> a = musicxml.mxObjects.Note()
>>> a.setDefaults()
>>> nh1 = musicxml.mxObjects.Notehead()
>>> nh1.set('charData', 'diamond')
>>> a.noteheadObj = nh1
>>> b = musicxml.mxObjects.Note()
>>> b.setDefaults()
>>> b.set('chord', True)
>>> m = musicxml.mxObjects.Measure()
>>> m.setDefaults()
>>> a.external['measure'] = m # assign measure for divisions ref
>>> a.external['divisions'] = m.external['divisions']
>>> b.external['measure'] = m # assign measure for divisions ref
>>> b.external['divisions'] = m.external['divisions']
>>> c = musicxml.fromMxObjects.mxToChord([a, b])
>>> c.getNotehead(c.pitches[0])
'diamond'
'''
if inputM21 == None:
c = chord.Chord()
else:
c = inputM21
if spannerBundle is None:
#environLocal.printDebug(['mxToNote()', 'creating SpannerBundle'])
spannerBundle = spanner.SpannerBundle()
else: # if we are passed in as spanner bundle, look for any pending
# component assignments
spannerBundle.freePendingSpannedElementAssignment(c)
# assume that first chord is the same duration for all parts
mxToDuration(mxNoteList[0], c.duration)
# assume that first note in list has a grace object (and all do)
mxGrace = mxNoteList[0].get('graceObj')
pitches = []
ties = [] # store equally spaced list; use None if not defined
noteheads = [] # store notehead attributes that correspond with pitches
stemDirs = [] # store stem direction attributes that correspond with pitches
for mxNote in mxNoteList:
# extract pitch pbjects
p = mxToPitch(mxNote)
pitches.append(p)
#extract notehead objects; may be None
nh = mxNote.get('noteheadObj')
noteheads.append(nh)
#extract stem directions
stemDir = mxNote.get('stem')
stemDirs.append(stemDir)
if len(mxNote.tieList) > 0:
tieObj = mxToTie(mxNote)
#environLocal.printDebug(['found tie in chord', tieObj])
ties.append(tieObj)
else: # need place holder for each pitch
ties.append(None)
# set all at once
c.pitches = pitches
# set beams from first note of chord
beamsObj = mxToBeams(mxNoteList[0].beamList)
c.beams = beamsObj
# set ties based on pitches
for i, t in enumerate(ties):
if t is not None:
# provide pitch to assign tie to based on index number
c.setTie(t, pitches[i])
#set notehead based on pitches
for index, obj in enumerate(noteheads):
if obj is not None:
c.setNotehead(obj.charData, c.pitches[index])
# set color per pitch
c.setColor(obj.get('color'), c.pitches[index])
#set stem direction based upon pitches
for i, sd in enumerate(stemDirs):
if sd != 'unspecified':
c.setStemDirection(sd, c.pitches[i])
if mxGrace is not None:
c = c.getGrace()
return c
#-------------------------------------------------------------------------------
# Notes
def mxToNote(mxNote, spannerBundle=None, inputM21=None):
'''
Translate a MusicXML :class:`~music21.musicxml.mxObjects.Note`
to a :class:`~music21.note.Note`.
The `spannerBundle` parameter can be a list or a Stream
for storing and processing Spanner objects.
If inputM21 is not `None` then that object is used
for translating. Otherwise a new Note is created.
Returns a `note.Note` object.
>>> mxNote = musicxml.mxObjects.Note()
>>> mxNote.setDefaults()
>>> mxMeasure = musicxml.mxObjects.Measure()
>>> mxMeasure.setDefaults()
>>> mxMeasure.append(mxNote)
>>> mxNote.external['measure'] = mxMeasure # manually create ref
>>> mxNote.external['divisions'] = mxMeasure.external['divisions']
>>> n = musicxml.fromMxObjects.mxToNote(mxNote)
>>> n
<music21.note.Note C>
'''
if inputM21 is None:
n = note.Note()
else:
n = inputM21
mxToPitch(mxNote, n.pitch) # required info will be taken from entire note
beamsObj = mxToBeams(mxNote.beamList)
n.beams = beamsObj
mxStem = mxNote.get('stem')
if mxStem is not None:
n.stemDirection = mxStem
# gets the notehead object from the mxNote and sets value of the music21 note
# to the value of the notehead object
mxNotehead = mxNote.get('noteheadObj')
if mxNotehead is not None:
if mxNotehead.charData not in ['', None]:
n.notehead = mxNotehead.charData
if mxNotehead.get('color') is not None:
n.color = mxNotehead.get('color')
# after this, use combined function for notes and rests...
return mxNoteToGeneralNoteHelper(n, mxNote, spannerBundle)
def mxToRest(mxNote, inputM21=None, spannerBundle=None):
'''Translate a MusicXML :class:`~music21.musicxml.mxObjects.Note` object to a :class:`~music21.note.Rest`.
If an `inputM21` object reference is provided, this object will be configured; otherwise, a new :class:`~music21.note.Rest` object is created and returned.
'''
if inputM21 == None:
r = note.Rest()
else:
r = inputM21
return mxNoteToGeneralNoteHelper(r, mxNote, spannerBundle)
def mxNoteToGeneralNoteHelper(n, mxNote, spannerBundle=None):
'''
helper function for things common to notes and rests.
n can be a note or rest...
'''
# doing this will create an instance, but will not be passed
# out of this method, and thus is only for testing
if spannerBundle is None:
#environLocal.printDebug(['mxToNote()', 'creating SpannerBundle'])
spannerBundle = spanner.SpannerBundle()
else: # if we are passed in as spanner bundle, look for any pending
# component assignments
spannerBundle.freePendingSpannedElementAssignment(n)
# print object == 'no' and grace notes may have a type but not
# a duration. they may be filtered out at the level of Stream
# processing
if mxNote.get('printObject') == 'no':
n.hideObjectOnPrint = True
#environLocal.printDebug(['got mxNote with printObject == no'])
mxGrace = mxNote.get('graceObj')
if mxGrace is not None:
#environLocal.printDebug(['mxGrace', mxGrace, mxNote, n.duration])
# in some casses grace notes may not have an assigned duration type
# this default type is set here, before assigning to n.duration
if mxNote.type is None:
#environLocal.printDebug(['mxToNote', 'mxNote that is a grace missing duration type'])
mxNote.type = 'eighth'
# the n.duration object here will be configured based on mxNote
mxToDuration(mxNote, n.duration)
# get color from Note first; if not, try to get from notehead
if mxNote.get('color') is not None:
n.color = mxNote.get('color')
# get x-positioning if any...
if mxNote.get('default-x') is not None:
n.xPosition = mxNote.get('default-x')
# can use mxNote.tieList instead
mxTieList = mxNote.get('tieList')
if len(mxTieList) > 0:
tieObj = mxToTie(mxNote) # m21 tie object
# provide entire Note
# n.tie is defined in GeneralNote as None by default
n.tie = tieObj
# things found in notations object:
# articulations, slurs
mxNotations = mxNote.get('notationsObj')
if mxNotations is not None:
# get a list of mxArticulationMarks, not mxArticulations
mxArticulationMarkList = mxNotations.getArticulations()
for mxObj in mxArticulationMarkList:
articulationObj = mxArticulationToArticulation(mxObj)
n.articulations.append(articulationObj)
# get any technical marks, a list of mxTechnicalMarks, not mxTechnical
# they live with articulations
mxTechnicalMarkList = mxNotations.getTechnical()
for mxObj in mxTechnicalMarkList:
technicalObj = mxTechnicalToArticulation(mxObj)
n.articulations.append(technicalObj)
# get any fermatas, store on expressions
mxFermataList = mxNotations.getFermatas()
for mxObj in mxFermataList:
fermataObj = mxFermataToFermata(mxObj)
n.expressions.append(fermataObj)
mxOrnamentsList = mxNotations.getOrnaments()
# if len(mxOrnamentsList) > 0:
# environLocal.printDebug(['mxOrnamentsList:', mxOrnamentsList])
for mxOrnamentsObj in mxOrnamentsList:
for mxObj in mxOrnamentsObj:
post = mxOrnamentToExpressionOrArticulation(mxObj)
if post is not None:
n.expressions.append(post)
#environLocal.printDebug(['adding to epxressions', post])
# create spanners:
mxNotationsToSpanners(n, mxNotations, spannerBundle)
# translate if necessary, otherwise leaves unchanged
n = mxGraceToGrace(n, mxGrace)
return n
#------------------------------------------------------------------------------
# Defaults
def mxDefaultsToScoreLayout(mxDefaults, inputM21=None):
'''
Convert a :class:`~music21.musicxml.mxObjects.Defaults`
object to a :class:`~music21.layout.ScoreLayout`
object
'''
if inputM21 is None:
scoreLayout = layout.ScoreLayout()
else:
scoreLayout = inputM21
mxScalingObj = mxDefaults.scalingObj
if mxScalingObj is not None:
mms = mxScalingObj.millimeters
scoreLayout.scalingMillimeters = mms
tenths = mxScalingObj.tenths
scoreLayout.scalingTenths = tenths
for mxLayoutObj in mxDefaults.layoutList:
if mxLayoutObj.tag == 'page-layout':
scoreLayout.pageLayout = mxPageLayoutToPageLayout(mxLayoutObj)
elif mxLayoutObj.tag == 'system-layout':
scoreLayout.systemLayout = mxSystemLayoutToSystemLayout(mxLayoutObj)
elif mxLayoutObj.tag == 'staff-layout': # according to xsd can be more than one. meaning?
scoreLayout.staffLayoutList.append(mxStaffLayoutToStaffLayout(mxLayoutObj))
return scoreLayout
#-------------------------------------------------------------------------------
# Measures
def addToStaffReference(mxObjectOrNumber, music21Object, staffReference):
'''
Utility routine for importing musicXML objects;
here, we store a reference to the music21 object in a dictionary,
where keys are the staff values. Staff values may be None, 1, 2, etc.
'''
#environLocal.printDebug(['addToStaffReference(): called with:', music21Object])
if common.isListLike(mxObjectOrNumber):
if len(mxObjectOrNumber) > 0:
mxObjectOrNumber = mxObjectOrNumber[0] # if a chord, get the first components
else: # if an empty list
environLocal.printDebug(['got an mxObject as an empty list', mxObjectOrNumber])
return
# add to staff reference
if hasattr(mxObjectOrNumber, 'staff'):
key = mxObjectOrNumber.staff
# some objects store staff assignment simply as number
else:
try:
key = mxObjectOrNumber.get('number')
except xmlnode.XMLNodeException:
return
except AttributeError: # a normal number
key = mxObjectOrNumber
if key not in staffReference:
staffReference[key] = []
staffReference[key].append(music21Object)
def mxToMeasure(mxMeasure, spannerBundle=None, inputM21=None, lastMeasureInfo=None):
'''
Translate an mxMeasure (a MusicXML :class:`~music21.musicxml.mxObjects.Measure` object)
into a music21 :class:`~music21.stream.Measure`.
If an `inputM21` object reference is provided, this object will be
configured and returned; otherwise, a new :class:`~music21.stream.Measure` object is created.
The `spannerBundle` that is passed in is used to accumulate any created Spanners.
This Spanners are not inserted into the Stream here.
Returns a tuple of (music21.stream.Measure object, staffReference (a dictionary for partStaffs of
elements that only belong to a single staff), and a transposition)
'''
if inputM21 == None:
m = stream.Measure()
else:
m = inputM21
# staff assignments: can create a dictionary with components in each
# staff; this dictionary will then be used to copy this measure and
# split components between two parts of more than one staff is defined
staffReference = {}
# doing this will create an instance, but will not be passed
# out of this method, and thus is only for testing
if spannerBundle is None:
#environLocal.printDebug(['mxToMeasure()', 'creating SpannerBundle'])
spannerBundle = spanner.SpannerBundle()
if lastMeasureInfo is not None:
lastMNum, lastMSuffix = lastMeasureInfo
else:
lastMNum, lastMSuffix = (None, None)
mNumRaw = mxMeasure.get('number')
if mNumRaw is None:
mNum = None
mSuffix = None
else:
mNum, mSuffix = common.getNumFromStr(mNumRaw)
# assume that measure numbers are integers
if mNum not in [None, '']:
m.number = int(mNum)
if mSuffix not in [None, '']:
m.numberSuffix = mSuffix
# fix for Finale which calls unnumbered measures X1, X2, etc. which
# we convert to 1.X, 2.X, etc. without this...
if lastMNum is not None:
if m.numberSuffix == 'X' and m.number != lastMNum + 1:
newSuffix = m.numberSuffix + str(m.number)
if lastMSuffix is not None:
newSuffix = lastMSuffix + newSuffix
m.number = lastMNum
m.numberSuffix = newSuffix
data = mxMeasure.get('width')
if data != None: # may need to do a format/unit conversion?
m.layoutWidth = data
# not yet implemented
junk = mxMeasure.get('implicit')
mxAttributes = mxMeasure.get('attributesObj')
mxAttributesInternal = True
# if we do not have defined mxAttributes, must get from stored attributes
if mxAttributes is None:
# need to keep track of where mxattributes src is coming from
# if attributes are defined in this measure, mxAttributesInternal
# is true
mxAttributesInternal = False
# not all measures have attributes definitions; this
# gets the last-encountered measure attributes
mxAttributes = mxMeasure.external['attributes']
if mxAttributes is None:
raise FromMxObjectsException(
'no mxAttribues available for this measure')
#environLocal.printDebug(['mxAttriutes clefList', mxAttributes.clefList,
# mxAttributesInternal])
staffLayoutObjects = []
# getting first for each of these for now
if mxAttributesInternal:
if len(mxAttributes.timeList) != 0:
for mxSub in mxAttributes.timeList:
ts = mxToTimeSignature(mxSub)
addToStaffReference(mxSub, ts, staffReference)
m._insertCore(0, ts)
if len(mxAttributes.clefList) != 0:
for mxClef in mxAttributes.clefList:
cl = mxClefToClef(mxClef)
addToStaffReference(mxClef, cl, staffReference)
m._insertCore(0, cl)
if len(mxAttributes.keyList) != 0:
for mxSub in mxAttributes.keyList:
ks = mxKeyListToKeySignature(mxSub)
addToStaffReference(mxSub, ks, staffReference)
m._insertCore(0, ks)
if len(mxAttributes.staffDetailsList) != 0:
for mxStaffDetails in mxAttributes.staffDetailsList:
foundMatch = False
# perhaps we've already put a staffLayout into the measure?
if mxStaffDetails._attr['number'] is not None:
for stl in staffLayoutObjects:
if stl.staffNumber == int(mxStaffDetails._attr['number']):
try:
stl.staffSize = float(mxStaffDetails.staffSize)
except TypeError:
if mxStaffDetails.staffSize is None:
pass
else:
raise TypeError("Incorrect number for mxStaffDetails.staffSize: %s", mxStaffDetails.staffSize)
foundMatch = True
break
else:
for stl in staffLayoutObjects:
if stl.staffSize is None:
stl.staffSize = float(mxStaffDetails.staffSize)
foundMatch = True
if stl.staffLines is None:
stl.staffLines = int(mxStaffDetails.staffLines)
foundMatch = True
if foundMatch is False:
staffSize = None
try:
staffSize = float(mxStaffDetails.staffSize)
except TypeError:
staffSize = None
staffLines = None
try:
staffLines = int(mxStaffDetails.staffLines)
except TypeError:
staffLines = 5
if mxStaffDetails._attr['number'] is not None:
stl = layout.StaffLayout(staffSize = staffSize, staffLines = staffLines, staffNumber=int(mxStaffDetails._attr['number']))
else:
stl = layout.StaffLayout(staffSize = staffSize, staffLines = staffLines)
if 'print-object' in mxStaffDetails._attr:
staffPrinted = mxStaffDetails._attr['print-object']
if staffPrinted == 'no' or staffPrinted is False:
stl.hidden = True
elif staffPrinted == 'yes' or staffPrinted is True:
stl.hidden = False
#else:
# print mxStaffDetails._attr
addToStaffReference(mxStaffDetails, stl, staffReference)
m._insertCore(0, stl)
staffLayoutObjects.append(stl)
#staffLayoutsAlreadySetList.append(stl)
#print "Got an mxStaffDetails %r" % mxStaffDetails
# transposition may be defined for a Part in the Measure attributes
transposition = None
if mxAttributesInternal and mxAttributes.transposeObj is not None:
# get interval object
transposition = mxTransposeToInterval(mxAttributes.transposeObj)
#environLocal.printDebug(['mxToMeasure: got transposition', transposition])
if mxAttributes.divisions is not None:
divisions = mxAttributes.divisions
else:
divisions = mxMeasure.external['divisions']
if divisions is None:
environLocal.printDebug(['cannot get a division from mxObject', m, "mxMeasure.external['divisions']", mxMeasure.external['divisions']])
raise FromMxObjectsException('cannot get a division from mxObject')
if mxMeasure.getVoiceCount() > 1:
useVoices = True
# count from zero
for voiceId in mxMeasure.getVoiceIndices():
v = stream.Voice()
v.id = voiceId
m._insertCore(0, v)
else:
useVoices = False
# iterate through components found on components list
# set to zero for each measure
offsetMeasureNote = 0 # offset of note w/n measure
mxNoteList = [] # for accumulating notes in chords
mxLyricList = [] # for accumulating lyrics assigned to chords
nLast = None # store the last-create music21 note for Spanners
restAndNoteCount = {'rest': 0, 'note': 0}
chordVoice = None # Sibelius 7.1 only puts a <voice> tag on the
# first note of a chord, so we need to make sure
# that we keep track of the last voice...
for i in range(len(mxMeasure)):
# try to get the next object for chord comparisons
mxObj = mxMeasure[i]
if i < len(mxMeasure) - 1:
mxObjNext = mxMeasure[i + 1]
else:
mxObjNext = None
#environLocal.printDebug(['handling', mxObj])
# NOTE: tests have shown that using isinstance() here is much faster
# than checking the .tag attribute.
# check for backup and forward first
if isinstance(mxObj, mxObjects.Backup):
# resolve as quarterLength, subtract from measure offset
#environLocal.printDebug(['found musicxl backup:', mxObj.duration])
offsetMeasureNote -= float(mxObj.duration) / float(divisions)
continue
elif isinstance(mxObj, mxObjects.Forward):
# resolve as quarterLength, add to measure offset
#environLocal.printDebug(['found musicxl forward:', mxObj.duration, 'divisions', divisions])
offsetMeasureNote += float(mxObj.duration) / float(divisions)
continue
elif isinstance(mxObj, mxObjects.Print):
# mxPrint objects may be found in a Measure's components
# contain page or system layout information among others
mxPrint = mxObj
addPageLayout = False
addSystemLayout = False
addStaffLayout = False
try:
addPageLayout = mxPrint.get('new-page')
if addPageLayout is not None:
addPageLayout = True # false for No??
else:
addPageLayout = False
except xmlnode.XMLNodeException:
pass
if not addPageLayout:
try:
addPageLayout = mxPrint.get('page-number')
if addPageLayout is not None:
addPageLayout = True
else:
addPageLayout = False
except xmlnode.XMLNodeException:
addPageLayout = False
if not addPageLayout:
for layoutType in mxPrint.componentList:
if isinstance(layoutType, mxObjects.PageLayout):
addPageLayout = True
break
try:
addSystemLayout = mxPrint.get('new-system')
if addSystemLayout is not None:
addSystemLayout = True # false for No?
else:
addSystemLayout = False
except xmlnode.XMLNodeException:
pass
if not addSystemLayout:
for layoutType in mxPrint.componentList:
if isinstance(layoutType, mxObjects.SystemLayout):
addSystemLayout = True
break
for layoutType in mxPrint.componentList:
if isinstance(layoutType, mxObjects.StaffLayout):
addStaffLayout = True
break
#--- now we know what we need to add, add em
if addPageLayout:
pl = mxPrintToPageLayout(mxPrint)
# store at zero position
m._insertCore(0, pl)
if addSystemLayout or not addPageLayout:
sl = mxPrintToSystemLayout(mxPrint)
# store at zero position
m._insertCore(0, sl)
if addStaffLayout:
stlList = mxPrintToStaffLayoutList(mxPrint)
for stl in stlList:
foundPrevious = False
for stlSetFromAttributes in staffLayoutObjects:
if stlSetFromAttributes.staffNumber == stl.staffNumber or stlSetFromAttributes.staffNumber is None or stl.staffNumber is None:
foundPrevious = True
stlSetFromAttributes.distance = stl.distance
if stlSetFromAttributes.hidden is None:
stlSetFromAttributes.hidden = stl.hidden
break
if foundPrevious is False:
addToStaffReference(str(stl.staffNumber), stl, staffReference)
m._insertCore(0, stl)
# <sound> tags may be found in the Measure, used to define tempo
elif isinstance(mxObj, mxObjects.Sound):
pass
elif isinstance(mxObj, mxObjects.Barline):
# repeat is a tag found in the barline object
mxBarline = mxObj
mxRepeatObj = mxBarline.get('repeatObj')
if mxRepeatObj is not None:
barline = mxToRepeat(mxBarline)
else:
barline = mxToBarline(mxBarline)
# barline objects also store ending objects, that mark begin
# and end of repeat bracket designations
mxEndingObj = mxBarline.get('endingObj')
if mxEndingObj is not None:
#environLocal.printDebug(['found mxEndingObj', mxEndingObj, 'm', m])
# get all incomplete spanners of the appropriate class that are
# not complete
rbSpanners = spannerBundle.getByClass('RepeatBracket').getByCompleteStatus(False)
# if we have no complete bracket objects, must start a new one
if len(rbSpanners) == 0:
# create with this measure as the object
rb = spanner.RepeatBracket(m)
# there may just be an ending marker, and no start
# this implies just one measure
if mxEndingObj.get('type') in ['stop', 'discontinue']:
rb.completeStatus = True
rb.number = mxEndingObj.get('number')
# set number; '' or None is interpreted as 1
spannerBundle.append(rb)
# if we have any incomplete, this must be the end
else:
#environLocal.printDebug(['matching RepeatBracket spanner', 'len(rbSpanners)', len(rbSpanners)])
rb = rbSpanners[0] # get RepeatBracket
# try to add this measure; may be the same
rb.addSpannedElements(m)
# in general, any rb found should be the opening, and thus
# this is the closing; can check
if mxEndingObj.get('type') in ['stop', 'discontinue']:
rb.completeStatus = True
rb.number = mxEndingObj.get('number')
else:
environLocal.warn('found mxEnding object that is not stop message, even though there is still an open start message. -- ignoring it')
if barline.location == 'left':
#environLocal.printDebug(['setting left barline', barline])
m.leftBarline = barline
elif barline.location == 'right':
#environLocal.printDebug(['setting right barline', barline])
m.rightBarline = barline
else:
environLocal.printDebug(['not handling barline that is neither left nor right', barline, barline.location])
elif isinstance(mxObj, mxObjects.Note):
mxNote = mxObj
if isinstance(mxObjNext, mxObjects.Note):
mxNoteNext = mxObjNext
else:
mxNoteNext = None
if mxNote.get('print-object') == 'no':
#environLocal.printDebug(['got mxNote with printObject == no', 'measure number', m.number])
continue
# mxGrace = mxNote.get('graceObj')
# if mxGrace is not None: # graces have a type but not a duration
# #environLocal.printDebug(['got mxNote with an mxGrace', 'duration', mxNote.get('duration'), 'measure number',
# #m.number])
# continue
# the first note of a chord is not identified directly; only
# by looking at the next note can we tell if we have the first
# note of a chord
if mxNoteNext is not None and mxNoteNext.get('chord') is True:
if mxNote.get('chord') is False:
mxNote.set('chord', True) # set the first as a chord
if mxNote.voice is not None:
chordVoice = mxNote.voice
if mxNote.get('rest') in [None, False]: # it is a note
# if a chord, do not increment until chord is complete
if mxNote.get('chord') is True:
mxNoteList.append(mxNote)
offsetIncrement = 0
# store lyrics for latter processing
for mxLyric in mxNote.lyricList:
mxLyricList.append(mxLyric)
else:
restAndNoteCount['note'] += 1
try:
n = mxToNote(mxNote, spannerBundle=spannerBundle)
except FromMxObjectsException as strerror:
raise FromMxObjectsException('cannot translate note in measure %s: %s' % (mNumRaw, strerror))
addToStaffReference(mxNote, n, staffReference)
if useVoices:
useVoice = mxNote.voice
if useVoice is None:
useVoice = chordVoice
if useVoice is None:
environLocal.warn("Cannot translate a note with a missing voice tag when no previous voice tag was given. Assuming voice 1... Object is %r " % mxNote)
useVoice = 1
thisVoice = m.voices[useVoice]
if thisVoice is None:
environLocal.warn('Cannot find voice %d for Note %r; putting outside of voices...' % (mxNote.voice, mxNote))
m._insertCore(offsetMeasureNote, n)
else:
thisVoice._insertCore(offsetMeasureNote, n)
else:
m._insertCore(offsetMeasureNote, n)
offsetIncrement = n.quarterLength
currentLyricNumber = 1
for mxLyric in mxNote.lyricList:
lyricObj = mxToLyric(mxLyric)
if lyricObj.number == 0:
lyricObj.number = currentLyricNumber
n.lyrics.append(lyricObj)
currentLyricNumber += 1
nLast = n # update
# if mxNote.get('notationsObj') is not None:
# for mxObjSub in mxNote.get('notationsObj'):
# # deal with ornaments, trill, etc
# pass
else: # its a rest
restAndNoteCount['rest'] += 1
n = note.Rest()
mxToRest(mxNote, inputM21=n)
addToStaffReference(mxNote, n, staffReference)
#m.insert(offsetMeasureNote, n)
if useVoices:
vCurrent = m.voices[mxNote.voice]
if vCurrent is not None:
vCurrent._insertCore(offsetMeasureNote, n)
else:
# this can happen when a part defines multiple staves
# where one staff uses voices but the other staff does not
m._insertCore(offsetMeasureNote, n)
#print m, n, mxNote
else:
m._insertCore(offsetMeasureNote, n)
offsetIncrement = n.quarterLength
nLast = n # update
# if we we have notes in the note list and the next
# note either does not exist or is not a chord, we
# have a complete chord
if len(mxNoteList) > 0 and (mxNoteNext is None
or mxNoteNext.get('chord') is False):
c = mxToChord(mxNoteList, spannerBundle=spannerBundle)
# add any accumulated lyrics
currentLyricNumber = 1
for mxLyric in mxLyricList:
lyricObj = mxToLyric(mxLyric)
if lyricObj.number == 0:
lyricObj.number = currentLyricNumber
c.lyrics.append(lyricObj)
currentLyricNumber += 1
addToStaffReference(mxNoteList, c, staffReference)
if useVoices:
useVoice = mxNote.voice
if useVoice is None:
useVoice = chordVoice
if useVoice is None:
environLocal.warn("Cannot translate a note with a missing voice tag when no previous voice tag was given. Assuming voice 1... Object is %r " % mxNote)
useVoice = 1
thisVoice = m.voices[useVoice]
if thisVoice is None:
environLocal.warn('Cannot find voice %d for Note %r; putting outside of voices...' % (mxNote.voice, mxNote))
m._insertCore(offsetMeasureNote, c)
else:
thisVoice._insertCore(offsetMeasureNote, c)
else:
m._insertCore(offsetMeasureNote, c)
mxNoteList = [] # clear for next chord
mxLyricList = []
offsetIncrement = c.quarterLength
nLast = c # update
# only increment Chords after completion
offsetMeasureNote += offsetIncrement
# mxDirections can be dynamics, repeat expressions, text expressions
elif isinstance(mxObj, mxObjects.Direction):
offsetDirection = mxToOffset(mxObj, divisions)
if mxObj.getDynamicMark() is not None:
# in rare cases there may be more than one dynamic in the same
# direction, so we iterate
for d in mxToDynamicList(mxObj):
addToStaffReference(mxObj, d, staffReference)
#m.insert(offsetMeasureNote, d)
m._insertCore(offsetMeasureNote + offsetDirection, d)
mxDirectionToSpanners(nLast, mxObj, spannerBundle)
# TODO: multiple spanners
# if mxObj.getWedge() is not None:
# w = mxToWedge(mxObj)
# addToStaffReference(mxObj, w, staffReference)
# m._insertCore(offsetMeasureNote, w)
if mxObj.getSegno() is not None:
rm = mxToSegno(mxObj.getSegno())
addToStaffReference(mxObj, rm, staffReference)
m._insertCore(offsetMeasureNote, rm)
if mxObj.getCoda() is not None:
rm = mxToCoda(mxObj.getCoda())
addToStaffReference(mxObj, rm, staffReference)
m._insertCore(offsetMeasureNote, rm)
if mxObj.getMetronome() is not None:
#environLocal.printDebug(['got getMetronome', mxObj.getMetronome()])
mm = mxToTempoIndication(mxObj.getMetronome())
addToStaffReference(mxObj, mm, staffReference)
# need to look for metronome marks defined above
# and look for text defined below
m._insertCore(offsetMeasureNote, mm)
if mxObj.getWords() is not None:
# TODO: need to look for tempo words if we have a metro
#environLocal.printDebug(['found mxWords object', mxObj])
# convert into a list of TextExpression objects
# this may be a TextExpression, or a RepeatExpression
for te in mxToTextExpression(mxObj):
#environLocal.printDebug(['got TextExpression object', repr(te)])
# offset here is a combination of the current position
# (offsetMeasureNote) and and the direction's offset
re = te.getRepeatExpression()
if re is not None:
# the repeat expression stores a copy of the text
# expression within it; replace it here on insertion
addToStaffReference(mxObj, re, staffReference)
m._insertCore(offsetMeasureNote + offsetDirection, re)
else:
addToStaffReference(mxObj, te, staffReference)
m._insertCore(offsetMeasureNote + offsetDirection, te)
elif isinstance(mxObj, mxObjects.Harmony):
mxHarmony = mxObj
h = mxToChordSymbol(mxHarmony)
addToStaffReference(mxObj, h, staffReference)
m._insertCore(offsetMeasureNote, h)
elif isinstance(mxObj, mxObjects.Clef):
cl = mxClefToClef(mxObj)
addToStaffReference(mxObj, cl, staffReference)
m._insertCore(offsetMeasureNote, cl)
#environLocal.printDebug(['staffReference', staffReference])
# if we have voices and/or if we used backup/forward, we may have
# empty space in the stream
if useVoices:
for v in m.voices:
if len(v) > 0: # do not bother with empty voices
v.makeRests(inPlace=True)
v._elementsChanged()
m._elementsChanged()
if restAndNoteCount['rest'] == 1 and restAndNoteCount['note'] == 0:
# full measure rest with no notes...
if useVoices:
pass # should do this on a per voice basis...
m._fullMeasureRest = False
else:
m._fullMeasureRest = True
else:
m._fullMeasureRest = False
return m, staffReference, transposition
#-------------------------------------------------------------------------------
# Streams
def mxToStreamPart(mxScore, partId, spannerBundle=None, inputM21=None):
'''
Load a part into a new Stream or one provided by
`inputM21` given an mxScore and a part name.
The `spannerBundle` reference, when passed in,
is used to accumulate Spanners. These are not inserted here.
Though it is incorrect MusicXML, PDFtoMusic creates
empty measures when it should create full
measures of rests (possibly hidden). This routine
fixes that bug. See http://musescore.org/en/node/15129
'''
#environLocal.printDebug(['calling Stream.mxToStreamPart'])
if inputM21 == None:
# need a Score to load parts into
s = stream.Score()
else:
s = inputM21
if spannerBundle == None:
spannerBundle = spanner.SpannerBundle()
mxPart = mxScore.getPart(partId)
# in some cases there may be more than one instrument defined
# in each score part; this has not been tested
mxInstrument = mxScore.getScorePart(partId)
# create a new music21 instrument
instrumentObj = instrument.Instrument()
if mxInstrument is not None: # mxInstrument is a ScorePart
# need an mxScorePart here
mxToInstrument(mxInstrument, instrumentObj)
# add part id as group
instrumentObj.groups.append(partId)
streamPart = stream.Part() # create a part instance for each part
# always assume at sounding, unless transposition is defined in attributes
streamPart.atSoundingPitch = True
# set part id to stream best name
if instrumentObj.bestName() is not None:
streamPart.id = instrumentObj.bestName()
streamPart._insertCore(0, instrumentObj) # add instrument at zero offset
staffReferenceList = []
# offset is in quarter note length
oMeasure = 0.0
lastTimeSignature = None
lastTransposition = None # may change at measure boundaries
lastMeasureWasShort = False # keep track of whether the last measure was short...
lastMeasureNumber = 0
lastMeasureSuffix = None
for i, mxMeasure in enumerate(mxPart):
# t here is transposition, if defined; otherwise it is None
try:
m, staffReference, t = mxToMeasure(mxMeasure,
spannerBundle=spannerBundle,
lastMeasureInfo=(lastMeasureNumber, lastMeasureSuffix))
except Exception as e:
import sys
measureNumber = "unknown"
try:
measureNumber = mxMeasure.get('number')
except:
pass
# see http://stackoverflow.com/questions/6062576/adding-information-to-a-python-exception
execInfoTuple = sys.exc_info()
if hasattr(e, 'message'):
emessage = e.message
else:
emessage = execInfoTuple[0].__name__ + " : " #+ execInfoTuple[1].__name__
message = "In measure (" + measureNumber + "): " + emessage
raise type(e)(type(e)(message), pprint.pformat(traceback.extract_tb(execInfoTuple[2])))
if t is not None:
if lastTransposition is None and i == 0: # if this is the first
#environLocal.printDebug(['transposition', t])
instrumentObj.transposition = t
else: # if not the first measure, need to copy as well
# for now, copy Instrument, change transposition,
# could insert in part, or in measure
newInst = copy.deepcopy(instrumentObj)
newInst.transposition = t
streamPart._insertCore(oMeasure, newInst)
# if a transposition is defined in musicxml, we assume it is
# at written pitch
streamPart.atSoundingPitch = False
# store last for comparison
lastTransposition = t
# there will be one for each measure
staffReferenceList.append(staffReference)
if m.number != lastMeasureNumber:
# we do this check so that we do not compound suffixes, i.e.:
# 23, 23.X1, 23.X1X2, 23.X1X2X3
# and instead just do:
# 23, 23.X1, 23.X2, etc.
lastMeasureNumber = m.number
lastMeasureSuffix = m.numberSuffix
if m.timeSignature is not None:
lastTimeSignature = m.timeSignature
elif lastTimeSignature is None and m.timeSignature is None:
# if no time sigature is defined, need to get a default
ts = meter.TimeSignature()
ts.load('%s/%s' % (defaults.meterNumerator,
defaults.meterDenominatorBeatType))
lastTimeSignature = ts
if m._fullMeasureRest is True:
r1 = m.getElementsByClass('Rest')[0]
if r1.duration.quarterLength == 4.0 and r1.duration.quarterLength != lastTimeSignature.barDuration.quarterLength:
r1.duration.quarterLength = lastTimeSignature.barDuration.quarterLength
m._elementsChanged()
del(m._fullMeasureRest)
# add measure to stream at current offset for this measure
streamPart._insertCore(oMeasure, m)
# note: we cannot assume that the time signature properly
# describes the offsets w/n this bar. need to look at
# offsets within measure; if the .highestTime value is greater
# use this as the next offset
mHighestTime = m.highestTime
lastTimeSignatureQuarterLength = lastTimeSignature.barDuration.quarterLength
if mHighestTime >= lastTimeSignatureQuarterLength :
mOffsetShift = mHighestTime
elif mHighestTime == 0.0 and len(m.flat.notesAndRests) == 0:
## this routine fixes a bug in PDFtoMusic and other MusicXML writers
## that omit empty rests in a Measure. It is a very quick test if
## the measure has any notes. Slower if it does not.
r = note.Rest()
r.duration.quarterLength = lastTimeSignatureQuarterLength
m.insert(0.0, r)
mOffsetShift = lastTimeSignatureQuarterLength
else: # use time signature
# for the first measure, this may be a pickup
# must detect this when writing, as next measures offsets will be
# incorrect
if oMeasure == 0.0:
# cannot get bar duration proportion if cannot get a ts
if m.barDurationProportion() < 1.0:
m.padAsAnacrusis()
#environLocal.printDebug(['incompletely filled Measure found on musicxml import; interpreting as a anacrusis:', 'padingLeft:', m.paddingLeft])
mOffsetShift = mHighestTime
# assume that, even if measure is incomplete, the next bar should
# start at the duration given by the time signature, not highestTime
### no...let's not do this...
else:
mOffsetShift = mHighestTime #lastTimeSignatureQuarterLength
if lastMeasureWasShort is True:
if m.barDurationProportion() < 1.0:
m.padAsAnacrusis() # probably a pickup after a repeat or phrase boundary or something
lastMeasureWasShort = False
else:
if mHighestTime < lastTimeSignatureQuarterLength:
lastMeasureWasShort = True
else:
lastMeasureWasShort = False
oMeasure += mOffsetShift
# if we have multiple staves defined, add more parts, and transfer elements
# note: this presently has to look at _idLastDeepCopyOf to get matches
# to find removed elements after copying; this is probably not the
# best way to do this. # V2.1 -- is not/will not be doing this. in fact idLastDeepCopyOf is
# going away...
# for this part, if any elements are components in the spannerBundle,
# then then we need to update the spannerBundle after the part is copied
streamPartStaff = None
if mxPart.getStavesCount() > 1:
separateOutPartStaffs(mxPart, streamPart, spannerBundle, s, staffReferenceList, partId)
else:
streamPart.addGroupForElements(partId) # set group for components
streamPart.groups.append(partId) # set group for stream itself
# TODO: this does not work with voices; there, Spanners
# will be copied into the Score
# copy spanners that are complete into the part, as this is the
# highest level container that needs them
rm = []
for sp in spannerBundle.getByCompleteStatus(True):
streamPart._insertCore(0, sp)
rm.append(sp)
# remove from original spanner bundle
for sp in rm:
spannerBundle.remove(sp)
# s is the score; adding the aprt to the score
streamPart._elementsChanged()
s._insertCore(0, streamPart)
s._elementsChanged()
# when adding parts to this Score
# this assumes all start at the same place
# even if there is only one part, it will be placed in a Stream
if streamPartStaff is not None:
return streamPartStaff
else:
return streamPart
def separateOutPartStaffs(mxPart, streamPart, spannerBundle, s, staffReferenceList, partId):
'''
given an mxPart and other necessary information, insert into the score (s) multiple
PartStaff objects separating the information for one part from the other
'''
# transfer all spanners to the streamPart such that they get
# updated in copying, then remove them
rm = []
for sp in spannerBundle.getByCompleteStatus(True):
streamPart._insertCore(0, sp)
rm.append(sp)
# remove from original spanner bundle
for sp in rm:
spannerBundle.remove(sp)
# get staves will return a number, between 1 and count
#for staffCount in range(mxPart.getStavesCount()):
for staffNumber in _getUniqueStaffKeys(staffReferenceList):
partStaffId = '%s-Staff%s' % (partId, staffNumber)
#environLocal.printDebug(['partIdStaff', partIdStaff, 'copying streamPart'])
# this deepcopy is necessary, as we will remove components
# in each staff that do not belong
# TODO: Do n-1 deepcopies, instead of n, since the last PartStaff can just remove from the original Part
streamPartStaff = copy.deepcopy(streamPart)
# assign this as a PartStaff, a subclass of Part
streamPartStaff.__class__ = stream.PartStaff
streamPartStaff.id = partStaffId
# remove all elements that are not part of this staff
mStream = streamPartStaff.getElementsByClass('Measure')
for i, staffReference in enumerate(staffReferenceList):
staffExclude = _getStaffExclude(staffReference, staffNumber)
if len(staffExclude) > 0:
m = mStream[i]
for eRemove in staffExclude:
for eMeasure in m:
if eMeasure.derivation.origin is eRemove and eMeasure.derivation.method == '__deepcopy__':
m.remove(eMeasure)
break
for v in m.voices:
v.remove(eRemove)
for eVoice in v.elements:
if eVoice.derivation.origin is eRemove and eVoice.derivation.method == '__deepcopy__':
v.remove(eVoice)
# after adjusting voices see if voices can be reduced or
# removed
#environLocal.printDebug(['calling flattenUnnecessaryVoices: voices before:', len(m.voices)])
m.flattenUnnecessaryVoices(force=False, inPlace=True)
#environLocal.printDebug(['calling flattenUnnecessaryVoices: voices after:', len(m.voices)])
# TODO: copying spanners may have created orphaned
# spanners that no longer have valid connections
# in this part; should be deleted
streamPartStaff.addGroupForElements(partStaffId)
streamPartStaff.groups.append(partStaffId)
streamPartStaff._elementsChanged()
s._insertCore(0, streamPartStaff)
def _getUniqueStaffKeys(staffReferenceList):
'''
Given a list of staffReference dictionaries,
collect and return a list of all unique keys except None
'''
post = []
for staffReference in staffReferenceList:
for key in staffReference:
if key is not None and key not in post:
post.append(key)
post.sort()
# if len(post) > 0:
# print post
return post
def _getStaffExclude(staffReference, targetKey):
'''
Given a staff reference dictionary, remove and combine in a list all elements that
are not part of the given key. Thus, return a list of all entries to remove.
It keeps those elements under staff key None (common to all) and
those under given key. This then is the list of all elements that should be deleted.
'''
post = []
for key in staffReference:
if key is None or int(key) == int(targetKey):
continue
post += staffReference[key]
return post
def mxScoreToScore(mxScore, spannerBundle=None, inputM21=None):
'''
Translate an mxScore into a music21 Score object
or puts it into the
given inputM21 object (which does not necessarily
have to be a :class:`~music21.stream.Score`
object. It can be any :class:`~music21.stream.Stream`
object)
All spannerBundles accumulated at all lower levels
are inserted here.
'''
# TODO: may not want to wait to this leve to insert spanners; may want to
# insert in lower positions if it makes sense
if inputM21 == None:
s = stream.Score()
else:
s = inputM21
if spannerBundle == None:
spannerBundle = spanner.SpannerBundle()
mxPartIds = mxScore.getPartIdsFromPartListObj()
#print mxPartIds
#mxPartIdDictionary = mxScore.partIdToNameDict()
m21PartIdDictionary = {}
# values are part names
#partNameIds = mxPartIdDictionary.keys()
#partNameIds.sort()
#for partId in partNameIds: # part names are part ids
for pNum, partId in enumerate(mxPartIds): # part names are part ids
# NOTE: setting partId not partId: might change
# return the part; however, it is still already attached to the Score
try:
part = mxToStreamPart(mxScore, partId=partId,
spannerBundle=spannerBundle, inputM21=s)
except Exception as e:
import sys
# see http://stackoverflow.com/questions/6062576/adding-information-to-a-python-exception
execInfoTuple = sys.exc_info()
if hasattr(e, 'message'):
emessage = e.message
else:
emessage = str(execInfoTuple[1])
message = "For part number " + str(pNum + 1) + ", with Id (" + partId + "): " + emessage
raise type(e)(type(e)(message), pprint.pformat(traceback.extract_tb(execInfoTuple[2])))
# update dictionary to store music21 part
m21PartIdDictionary[partId] = part
#print("%r %s %r" % (m21PartIdDictionary, partId, part))
# get part/staff groups
#environLocal.printDebug(['partgroups:', mxScore.getPartGroupData()])
partGroupData = mxScore.getPartGroupData()
for partGroup in partGroupData: # a list of dictionaries
# create music21 spanner StaffGroup
sg = layout.StaffGroup()
for partId in partGroup['scorePartIds']:
# get music21 part from partIdDictionary
try:
sg.addSpannedElements(m21PartIdDictionary[partId])
except KeyError as ke:
raise FromMxObjectsException("Cannot find part in m21PartIdDictionary: %s \n Full Dict:\n %r " % (ke, m21PartIdDictionary))
# use configuration routine to transfer/set attributes;
# sets complete status as well
configureStaffGroupFromMxPartGroup(sg, partGroup['partGroup'])
spannerBundle.append(sg) # will be added to the Score
# add metadata object; this is placed after all other parts now
# these means that both Parts and other objects live on Stream.
md = mxScoreToMetadata(mxScore)
s._insertCore(0, md)
if mxScore.defaultsObj is not None:
scoreLayout = mxDefaultsToScoreLayout(mxScore.defaultsObj)
s._insertCore(0, scoreLayout)
# store credits on Score stream
for mxCredit in mxScore.creditList:
co = mxCreditToTextBox(mxCredit)
s._insertCore(0, co) # insert position does not matter
## get supports information
mxIdentification = mxScore.identificationObj
if mxIdentification is not None:
mxEncoding = mxIdentification.encodingObj
if mxEncoding is not None:
for mxSupports in mxEncoding.supportsList:
if (mxSupports.get('attribute') == 'new-system' and
mxSupports.get('value') == 'yes'):
s.definesExplicitSystemBreaks = True
for p in s.parts:
p.definesExplicitSystemBreaks = True
elif (mxSupports.get('attribute') == 'new-page' and
mxSupports.get('value') == 'yes'):
s.definesExplicitPageBreaks = True
for p in s.parts:
p.definesExplicitPageBreaks = True
# only insert complete spanners; at each level possible, complete spanners
# are inserted into either the Score or the Part
# storing complete Part spanners in a Part permits extracting parts with spanners
rm = []
for sp in spannerBundle.getByCompleteStatus(True):
s._insertCore(0, sp)
rm.append(sp)
for sp in rm:
spannerBundle.remove(sp)
s._elementsChanged()
return s
#------------------------------------------------------------------------------
# beam and beams
def mxToBeam(mxBeam, inputM21 = None):
'''
given an mxBeam object return a :class:`~music21.beam.Beam` object
>>> mxBeam = musicxml.mxObjects.Beam()
>>> mxBeam.set('charData', 'begin')
>>> a = musicxml.fromMxObjects.mxToBeam(mxBeam)
>>> a.type
'start'
>>> mxBeam.set('charData', 'continue')
>>> a = musicxml.fromMxObjects.mxToBeam(mxBeam)
>>> a.type
'continue'
>>> mxBeam.set('charData', 'end')
>>> a = musicxml.fromMxObjects.mxToBeam(mxBeam)
>>> a.type
'stop'
>>> mxBeam.set('charData', 'forward hook')
>>> a = musicxml.fromMxObjects.mxToBeam(mxBeam)
>>> a.type
'partial'
>>> a.direction
'right'
>>> mxBeam.set('charData', 'backward hook')
>>> a = musicxml.fromMxObjects.mxToBeam(mxBeam)
>>> a.type
'partial'
>>> a.direction
'left'
>>> mxBeam.set('charData', 'crazy')
>>> a = musicxml.fromMxObjects.mxToBeam(mxBeam)
Traceback (most recent call last):
FromMxObjectsException: unexpected beam type encountered (crazy)
'''
if inputM21 is None:
beamOut = beam.Beam()
else:
beamOut = inputM21
mxType = mxBeam.get('charData')
if mxType == 'begin':
beamOut.type = 'start'
elif mxType == 'continue':
beamOut.type = 'continue'
elif mxType == 'end':
beamOut.type = 'stop'
elif mxType == 'forward hook':
beamOut.type = 'partial'
beamOut.direction = 'right'
elif mxType == 'backward hook':
beamOut.type = 'partial'
beamOut.direction = 'left'
else:
raise FromMxObjectsException('unexpected beam type encountered (%s)' % mxType)
return beamOut
def mxToBeams(mxBeamList, inputM21 = None):
'''given a list of mxBeam objects, sets the beamsList
>>> a = beam.Beams()
>>> a.fill(2, type='start')
>>> mxBeamList = musicxml.toMxObjects.beamsToMx(a)
>>> b = musicxml.fromMxObjects.mxToBeams(mxBeamList)
>>> b
<music21.beam.Beams <music21.beam.Beam 1/start>/<music21.beam.Beam 2/start>>
'''
if inputM21 is None:
beamsOut = beam.Beams()
else:
beamsOut = inputM21
for i, mxBeam in enumerate(mxBeamList):
beamObj = mxToBeam(mxBeam)
beamObj.number = i + 1
beamsOut.beamsList.append(beamObj)
return beamsOut
#---------------------------------------------------------
# layout
def mxPrintToPageLayout(mxPrint, inputM21 = None):
'''
Given an mxPrint object, set object data for
the print section of a layout.PageLayout object
>>> mxPrint = musicxml.mxObjects.Print()
>>> mxPrint.set('new-page', 'yes')
>>> mxPrint.set('page-number', 5)
>>> mxPageLayout = musicxml.mxObjects.PageLayout()
>>> mxPageLayout.pageHeight = 4000
>>> mxPageMargins = musicxml.mxObjects.PageMargins()
>>> mxPageMargins.set('leftMargin', 20)
>>> mxPageMargins.set('rightMargin', 30.2)
>>> mxPageLayout.append(mxPageMargins)
>>> mxPrint.append(mxPageLayout)
>>> pl = musicxml.fromMxObjects.mxPrintToPageLayout(mxPrint)
>>> pl.isNew
True
>>> pl.rightMargin > 30.1 and pl.rightMargin < 30.3
True
>>> pl.leftMargin
20.0
>>> pl.pageNumber
5
Alternatively, pass a music21 object into this routine.
>>> plAlt = layout.PageLayout()
>>> musicxml.fromMxObjects.mxPrintToPageLayout(mxPrint, plAlt)
>>> plAlt.pageNumber
5
>>> plAlt.pageHeight
4000.0
>>> plAlt.isNew
True
'''
if inputM21 is None:
pageLayout = layout.PageLayout()
else:
pageLayout = inputM21
data = mxPrint.get('newPage')
if data == 'yes': # encoded as yes/no in musicxml
pageLayout.isNew = True
else:
pageLayout.isNew = False
number = mxPrint.get('page-number')
if number is not None and number != "":
if common.isStr(number):
pageLayout.pageNumber = int(number)
else:
pageLayout.pageNumber = number
mxPageLayout = None # blank
for x in mxPrint:
if isinstance(x, mxObjects.PageLayout):
mxPageLayout = x
break # find first and break
if mxPageLayout is not None:
mxPageLayoutToPageLayout(mxPageLayout, inputM21 = pageLayout)
if inputM21 is None:
return pageLayout
def mxPageLayoutToPageLayout(mxPageLayout, inputM21 = None):
'''
get a PageLayout object from an mxPageLayout
Called out from mxPrintToPageLayout because it
is also used in the <defaults> tag
'''
if inputM21 is None:
pageLayout = layout.PageLayout()
else:
pageLayout = inputM21
pageHeight = mxPageLayout.get('pageHeight')
if pageHeight is not None:
pageLayout.pageHeight = float(pageHeight)
pageWidth = mxPageLayout.get('pageWidth')
if pageWidth is not None:
pageLayout.pageWidth = float(pageWidth)
mxPageMargins = None
for x in mxPageLayout:
if isinstance(x, mxObjects.PageMargins):
mxPageMargins = x
if mxPageMargins != None:
data = mxPageMargins.get('leftMargin')
if data != None:
# may be floating point values
pageLayout.leftMargin = float(data)
data = mxPageMargins.get('rightMargin')
if data != None:
pageLayout.rightMargin = float(data)
data = mxPageMargins.get('topMargin')
if data != None:
pageLayout.topMargin = float(data)
data = mxPageMargins.get('bottomMargin')
if data != None:
pageLayout.bottomMargin = float(data)
if inputM21 is None:
return pageLayout
def mxPrintToSystemLayout(mxPrint, inputM21 = None):
'''
Given an mxPrint object, set object data
>>> mxPrint = musicxml.mxObjects.Print()
>>> mxPrint.set('new-system', 'yes')
>>> mxSystemLayout = musicxml.mxObjects.SystemLayout()
>>> mxSystemLayout.systemDistance = 55
>>> mxSystemMargins = musicxml.mxObjects.SystemMargins()
>>> mxSystemMargins.set('leftMargin', 20)
>>> mxSystemMargins.set('rightMargin', 30.2)
>>> mxSystemLayout.append(mxSystemMargins)
>>> mxPrint.append(mxSystemLayout)
>>> sl = musicxml.fromMxObjects.mxPrintToSystemLayout(mxPrint)
>>> sl.isNew
True
>>> sl.rightMargin > 30.1 and sl.rightMargin <= 30.2
True
>>> sl.leftMargin
20.0
>>> sl.distance
55.0
'''
if inputM21 is None:
systemLayout = layout.SystemLayout()
else:
systemLayout = inputM21
data = mxPrint.get('newSystem')
if data == 'yes': # encoded as yes/no in musicxml
systemLayout.isNew = True
elif data == 'no':
systemLayout.isNew = False
#mxSystemLayout = mxPrint.get('systemLayout')
mxSystemLayout = None # blank
for x in mxPrint:
if isinstance(x, mxObjects.SystemLayout):
mxSystemLayout = x
break # find first and break
if mxSystemLayout is not None:
mxSystemLayoutToSystemLayout(mxSystemLayout, inputM21 = systemLayout)
if inputM21 is None:
return systemLayout
def mxSystemLayoutToSystemLayout(mxSystemLayout, inputM21 = None):
'''
get a SystemLayout object from an mxSystemLayout
Called out from mxPrintToSystemLayout because it
is also used in the <defaults> tag
'''
if inputM21 is None:
systemLayout = layout.SystemLayout()
else:
systemLayout = inputM21
mxSystemMargins = None
for x in mxSystemLayout:
if isinstance(x, mxObjects.SystemMargins):
mxSystemMargins = x
break
if mxSystemMargins is not None:
data = mxSystemMargins.get('leftMargin')
if data != None:
# may be floating point values
systemLayout.leftMargin = float(data)
data = mxSystemMargins.get('rightMargin')
if data != None:
systemLayout.rightMargin = float(data)
data = mxSystemMargins.get('topMargin')
if data != None:
systemLayout.rightMargin = float(data)
data = mxSystemMargins.get('bottomMargin')
if data != None:
systemLayout.rightMargin = float(data)
if mxSystemLayout.systemDistance != None:
systemLayout.distance = float(mxSystemLayout.systemDistance)
if mxSystemLayout.topSystemDistance != None:
systemLayout.topDistance = float(mxSystemLayout.topSystemDistance)
if inputM21 is None:
return systemLayout
def mxPrintToStaffLayoutList(mxPrint, inputM21 = None):
'''
Given an mxPrint object, return a list of StaffLayout objects (may be empty)
>>> mxPrint = musicxml.mxObjects.Print()
# this is a red-herring... does nothing here...
>>> mxPrint.set('new-system', 'yes')
>>> mxStaffLayout = musicxml.mxObjects.StaffLayout()
>>> mxStaffLayout.staffDistance = 55
>>> mxStaffLayout.set('number', 1)
>>> mxPrint.append(mxStaffLayout)
>>> slList = musicxml.fromMxObjects.mxPrintToStaffLayoutList(mxPrint)
>>> sl = slList[0]
>>> sl.distance
55.0
>>> sl.staffNumber
1
'''
staffLayoutList = []
for x in mxPrint:
if isinstance(x, mxObjects.StaffLayout):
sl = mxStaffLayoutToStaffLayout(x)
staffLayoutList.append(sl)
return staffLayoutList
def mxStaffLayoutToStaffLayout(mxStaffLayout, inputM21 = None):
'''
get a StaffLayout object from an mxStaffLayout
Called out from mxPrintToStaffLayoutList because it
is also used in the <defaults> tag
'''
if inputM21 is None:
staffLayout = layout.StaffLayout()
else:
staffLayout = inputM21
if mxStaffLayout.staffDistance != None:
staffLayout.distance = float(mxStaffLayout.staffDistance)
try:
data = mxStaffLayout.get('number')
if data is not None:
staffLayout.staffNumber = int(data)
except xmlnode.XMLNodeException:
pass
if inputM21 is None:
return staffLayout
#-----------------------------------------------------------------
# metadata
def mxScoreToMetadata(mxScore, inputM21 = None):
'''
Use an mxScore, to fill in parameters of a
:class:`~music21.metadata.Metadata` object.
if `inputM21` is None, a new `Metadata` object
is created and returned at the end.
Otherwise, the parameters of this Metadata object
are changed and nothing is returned.
'''
if inputM21 is not None:
md = inputM21
else:
md = metadata.Metadata()
mxMovementNumber = mxScore.get('movementNumber')
if mxMovementNumber != None:
md.movementNumber = mxMovementNumber
# xml calls this title not name
mxName = mxScore.get('movementTitle')
if mxName != None:
md.movementName = mxName
mxWork = mxScore.get('workObj')
if mxWork != None: # may be set to none
md.title = mxWork.get('workTitle')
#environLocal.printDebug(['mxScoreToMetadata, got title', md.title])
md.number = mxWork.get('workNumber')
md.opusNumber = mxWork.get('opus')
mxIdentification = mxScore.get('identificationObj')
if mxIdentification != None:
for mxCreator in mxIdentification.get('creatorList'):
# do an mx conversion for mxCreator to Contributor
c = mxCreatorToContributor(mxCreator)
md._contributors.append(c)
# not yet supported; an encoding is also found in identification obj
# mxEncoding = mxScore.get('encodingObj')
if inputM21 is None:
return md
def mxCreatorToContributor(mxCreator, inputM21 = None):
'''
Given an mxCreator, fill the necessary parameters of a Contributor.
>>> mxCreator = musicxml.mxObjects.Creator()
>>> mxCreator.set('type', 'composer')
>>> mxCreator.set('charData', 'Beethoven, <NAME>')
>>> c = musicxml.fromMxObjects.mxCreatorToContributor(mxCreator)
>>> c
<music21.metadata.primitives.Contributor object at 0x...>
>>> c.role
'composer'
>>> c.name
'<NAME>'
'''
if inputM21 is None:
c = metadata.Contributor()
else:
c = inputM21
mxCreatorType = mxCreator.get('type')
if mxCreatorType != None and \
mxCreatorType in metadata.Contributor.roleNames:
c.role = mxCreatorType
else: # roles are not defined in musicxml
pass
#environLocal.printDebug(['mxCreatorToContributor:', 'received unknown Contributor role: %s' % mxCreatorType])
# remove any whitespace found
c.name = mxCreator.get('charData').strip()
if inputM21 is None:
return c
#-------------------------------------------------------------------------------
class Test(unittest.TestCase):
def runTest(self):
pass
def testBasic(self):
pass
def pitchOut(self, listIn):
'''
make it so that the tests that look for the old-style pitch.Pitch
representation still work.
'''
out = "["
for p in listIn:
out += str(p) + ', '
out = out[0:len(out)-2]
out += "]"
return out
def testBarRepeatConversion(self):
from music21 import corpus
#a = converter.parse(testPrimitive.simpleRepeat45a)
# this is a good example with repeats
s = corpus.parse('k80/movement3')
for p in s.parts:
post = p.flat.getElementsByClass('Repeat')
self.assertEqual(len(post), 6)
#a = corpus.parse('opus41no1/movement3')
#s.show()
def testVoices(self):
from music21 import converter
from music21.musicxml import testPrimitive
s = converter.parse(testPrimitive.voiceDouble)
m1 = s.parts[0].getElementsByClass('Measure')[0]
self.assertEqual(m1.hasVoices(), True)
self.assertEqual([v.id for v in m1.voices], [u'1', u'2'])
self.assertEqual([e.offset for e in m1.voices[0]], [0.0, 1.0, 2.0, 3.0])
self.assertEqual([e.offset for e in m1.voices['1']], [0.0, 1.0, 2.0, 3.0])
self.assertEqual([e.offset for e in m1.voices[1]], [0.0, 2.0, 2.5, 3.0, 3.5])
self.assertEqual([e.offset for e in m1.voices['2']], [0.0, 2.0, 2.5, 3.0, 3.5])
#s.show()
def testSlurInputA(self):
from music21 import converter
from music21.musicxml import testPrimitive
s = converter.parse(testPrimitive.spannersSlurs33c)
# have 10 spanners
self.assertEqual(len(s.flat.getElementsByClass('Spanner')), 5)
# can get the same from a getAll search
self.assertEqual(len(s.getAllContextsByClass('Spanner')), 5)
# try to get all spanners from the first note
self.assertEqual(len(s.flat.notesAndRests[0].getAllContextsByClass('Spanner')), 5)
#s.show('t')
#s.show()
def testMultipleStavesPerPartA(self):
from music21 import converter
from music21.musicxml import testPrimitive
from music21.musicxml import xmlHandler
mxDoc = xmlHandler.Document()
mxDoc.read(testPrimitive.pianoStaff43a)
# parts are stored in component list
p1 = mxDoc.score.componentList[0]
self.assertEqual(p1.getStavesCount(), 2)
s = converter.parse(testPrimitive.pianoStaff43a)
self.assertEqual(len(s.parts), 2)
#s.show()
self.assertEqual(len(s.parts[0].flat.getElementsByClass('Note')), 1)
self.assertEqual(len(s.parts[1].flat.getElementsByClass('Note')), 1)
self.assertEqual(isinstance(s.parts[0], stream.PartStaff), True)
self.assertEqual(isinstance(s.parts[1], stream.PartStaff), True)
def testMultipleStavesPerPartB(self):
from music21 import converter
from music21.musicxml import testFiles
s = converter.parse(testFiles.moussorgskyPromenade) # @UndefinedVariable
self.assertEqual(len(s.parts), 2)
self.assertEqual(len(s.parts[0].flat.getElementsByClass('Note')), 19)
# only chords in the second part
self.assertEqual(len(s.parts[1].flat.getElementsByClass('Note')), 0)
self.assertEqual(len(s.parts[0].flat.getElementsByClass('Chord')), 11)
self.assertEqual(len(s.parts[1].flat.getElementsByClass('Chord')), 11)
#s.show()
def testMultipleStavesPerPartC(self):
from music21 import corpus
s = corpus.parse('schoenberg/opus19/movement2')
self.assertEqual(len(s.parts), 2)
s = corpus.parse('schoenberg/opus19/movement6')
self.assertEqual(len(s.parts), 2)
#s.show()
def testSpannersA(self):
from music21 import converter
from music21.musicxml import testPrimitive
s = converter.parse(testPrimitive.spanners33a)
# this number will change as more are being imported
self.assertEqual(len(s.flat.spanners) >= 2, True)
#environLocal.printDebug(['pre s.measures(2,3)', 's', s])
ex = s.measures(2, 3) # this needs to get all spanners too
# all spanners are referenced over; even ones that may not be relevant
self.assertEqual(len(ex.flat.spanners), 14)
#ex.show()
# slurs are on measures 2, 3
# crescendos are on measures 4, 5
# wavy lines on measures 6, 7
# brackets etc. on measures 10-14
# glissando on measure 16
def testTextExpressionsA(self):
from music21 import converter
from music21.musicxml import testPrimitive
s = converter.parse(testPrimitive.textExpressions)
#s.show()
self.assertEqual(len(s.flat.getElementsByClass('TextExpression')), 3)
p1 = s.parts[0]
m1 = p1.getElementsByClass('Measure')[0]
self.assertEqual(len(m1.getElementsByClass('TextExpression')), 0)
# all in measure 2
m2 = p1.getElementsByClass('Measure')[1]
self.assertEqual(len(m2.getElementsByClass('TextExpression')), 3)
teStream = m2.getElementsByClass('TextExpression')
self.assertEqual([te.offset for te in teStream], [1.0, 1.5, 4.0])
#s.show()
def testTextExpressionsC(self):
from music21 import corpus
s = corpus.parse('bwv66.6')
p = s.parts[0]
for m in p.getElementsByClass('Measure'):
for n in m.flat.notes:
if n.pitch.name in ['B']:
msg = '%s\n%s' % (n.pitch.nameWithOctave, n.duration.quarterLength)
te = expressions.TextExpression(msg)
te.size = 14
te.style = 'bold'
te.justify = 'center'
te.enclosure = 'rectangle'
te.positionVertical = -80
m.insert(n.offset, te)
#p.show()
def testTextExpressionsD(self):
from music21 import corpus
# test placing text expression in arbitrary locations
s = corpus.parse('bwv66.6')
p = s.parts[-1] # get bass
for m in p.getElementsByClass('Measure')[1:]:
for pos in [1.5, 2.5]:
te = expressions.TextExpression(pos)
te.style = 'bold'
te.justify = 'center'
te.enclosure = 'rectangle'
m.insert(pos, te)
#p.show()
def testTextExpressionsE(self):
import random
s = stream.Stream()
for i in range(6):
m = stream.Measure(number=i + 1)
m.append(layout.SystemLayout(isNew=True))
m.append(note.Rest(type='whole'))
s.append(m)
for m in s.getElementsByClass('Measure'):
offsets = [x * .25 for x in range(16)]
random.shuffle(offsets)
offsets = offsets[:4]
for o in offsets:
te = expressions.TextExpression(o)
te.style = 'bold'
te.justify = 'center'
te.enclosure = 'rectangle'
m.insert(o, te)
#s.show()
def testImportRepeatExpressionsA(self):
# test importing from musicxml
from music21.musicxml import testPrimitive
from music21 import converter
# has one segno
s = converter.parse(testPrimitive.repeatExpressionsA)
self.assertEqual(len(s.flat.getElementsByClass(repeat.Segno)), 1)
self.assertEqual(len(s.flat.getElementsByClass(repeat.Fine)), 1)
self.assertEqual(len(s.flat.getElementsByClass(repeat.DalSegnoAlFine)), 1)
# has two codas
s = converter.parse(testPrimitive.repeatExpressionsB)
self.assertEqual(len(s.flat.getElementsByClass(repeat.Coda)), 2)
# has one d.c.al coda
self.assertEqual(len(s.flat.getElementsByClass(repeat.DaCapoAlCoda)), 1)
def testImportRepeatBracketA(self):
from music21 import corpus
# has repeats in it; start with single emasure
s = corpus.parse('opus74no1', 3)
# there are 2 for each part, totaling 8
self.assertEqual(len(s.flat.getElementsByClass('RepeatBracket')), 8)
# can get for each part as spanners are stored in Part now
# TODO: need to test getting repeat brackets after measure extraction
#s.parts[0].show() # 72 through 77
sSub = s.parts[0].measures(72, 77)
# 2 repeat brackets are gathered b/c they are stored at the Part by
# default
rbSpanners = sSub.getElementsByClass('RepeatBracket')
self.assertEqual(len(rbSpanners), 2)
def testImportVoicesA(self):
# testing problematic voice imports
from music21.musicxml import testPrimitive
from music21 import converter
# this 2 part segments was importing multiple voices within
# a measure, even though there was no data in the second voice
s = converter.parse(testPrimitive.mixedVoices1a)
self.assertEqual(len(s.parts), 2)
# there are voices, but they have been removed
self.assertEqual(len(s.parts[0].getElementsByClass(
'Measure')[0].voices), 0)
#s.parts[0].show('t')
#self.assertEqual(len(s.parts[0].voices), 2)
s = converter.parse(testPrimitive.mixedVoices1b)
self.assertEqual(len(s.parts), 2)
self.assertEqual(len(s.parts[0].getElementsByClass(
'Measure')[0].voices), 0)
#s.parts[0].show('t')
# this case, there were 4, but there should be 2
s = converter.parse(testPrimitive.mixedVoices2)
self.assertEqual(len(s.parts), 2)
self.assertEqual(len(s.parts[0].getElementsByClass(
'Measure')[0].voices), 2)
self.assertEqual(len(s.parts[1].getElementsByClass(
'Measure')[0].voices), 2)
#s.parts[0].show('t')
# s = converter.parse(testPrimitive.mixedVoices1b)
# s = converter.parse(testPrimitive.mixedVoices2)
def testImportMetronomeMarksA(self):
from music21.musicxml import testPrimitive
from music21 import converter
# has metronome marks defined, not with sound tag
s = converter.parse(testPrimitive.metronomeMarks31c)
# get all tempo indications
mms = s.flat.getElementsByClass('TempoIndication')
self.assertEqual(len(mms) > 3, True)
def testImportMetronomeMarksB(self):
pass
# TODO: look for files that only have sound tags and create MetronomeMarks
# need to look for bundling of Words text expressions with tempo
# has only sound tempo=x tag
#s = converter.parse(testPrimitive.articulations01)
#s.show()
def testImportGraceNotesA(self):
# test importing from musicxml
from music21.musicxml import testPrimitive
from music21 import converter
unused_s = converter.parse(testPrimitive.graceNotes24a)
#s.show()
def testChordalStemDirImport(self):
#NB: Finale apparently will not display a pitch that is a member of a chord without a stem
#unless all chord members are without stems.
from music21.musicxml import m21ToString
from music21 import converter
n1 = note.Note('f3')
n1.notehead = 'diamond'
n1.stemDirection = 'down'
n2 = note.Note('c4')
n2.stemDirection = 'noStem'
c = chord.Chord([n1, n2])
c.quarterLength = 2
xml = m21ToString.fromMusic21Object(c)
#print xml
#c.show()
inputStream = converter.parse(xml)
chordResult = inputStream.flat.notes[0]
# for n in chordResult:
# print n.stemDirection
self.assertEqual(chordResult.getStemDirection(chordResult.pitches[0]), 'down')
self.assertEqual(chordResult.getStemDirection(chordResult.pitches[1]), 'noStem')
def testStaffGroupsA(self):
from music21.musicxml import testPrimitive
from music21 import converter
s = converter.parse(testPrimitive.staffGroupsNested41d)
self.assertEqual(len(s.getElementsByClass('StaffGroup')), 2)
#raw = s.musicxml
sg1 = s.getElementsByClass('StaffGroup')[0]
self.assertEqual(sg1.symbol, 'brace')
self.assertEqual(sg1.barTogether, True)
sg2 = s.getElementsByClass('StaffGroup')[1]
self.assertEqual(sg2.symbol, 'line')
self.assertEqual(sg2.barTogether, True)
def testInstrumentTranspositionA(self):
from music21.musicxml import testPrimitive
from music21 import converter
s = converter.parse(testPrimitive.transposingInstruments72a)
i1 = s.parts[0].flat.getElementsByClass('Instrument')[0]
i2 = s.parts[1].flat.getElementsByClass('Instrument')[0]
i3 = s.parts[2].flat.getElementsByClass('Instrument')[0]
self.assertEqual(str(i1.transposition), '<music21.interval.Interval M-2>')
self.assertEqual(str(i2.transposition), '<music21.interval.Interval M-6>')
def testInstrumentTranspositionB(self):
from music21.musicxml import testPrimitive
from music21 import converter
s = converter.parse(testPrimitive.transposing01)
iStream1 = s.parts[0].flat.getElementsByClass('Instrument')
# three instruments; one initial, and then one for each transposition
self.assertEqual(len(iStream1), 3)
# should be 3
iStream2 = s.parts[1].flat.getElementsByClass('Instrument')
self.assertEqual(len(iStream2), 3)
i2 = iStream2[0]
iStream3 = s.parts[2].flat.getElementsByClass('Instrument')
self.assertEqual(len(iStream3), 1)
i3 = iStream3[0]
self.assertEqual(str(iStream1[0].transposition), 'None')
self.assertEqual(str(iStream1[1].transposition), '<music21.interval.Interval P-5>')
self.assertEqual(str(iStream1[2].transposition), '<music21.interval.Interval P1>')
self.assertEqual(str(iStream2[0].transposition), '<music21.interval.Interval M-2>')
self.assertEqual(str(iStream2[1].transposition), '<music21.interval.Interval m3>')
self.assertEqual(str(i3.transposition), '<music21.interval.Interval P-5>')
self.assertEqual(self.pitchOut([p for p in s.parts[0].flat.pitches]), '[A4, A4, A4, A4, A4, A4, A4, A4, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, A4, A4, A4, A4]')
self.assertEqual(self.pitchOut([p for p in s.parts[1].flat.pitches]), '[B4, B4, B4, B4, F#4, F#4, F#4, F#4, F#4, F#4, F#4, F#4, F#4, F#4, F#4, F#4, F#4, F#4, F#4, F#4, B4, B4, B4, B4, B4, B4]')
self.assertEqual(self.pitchOut([p for p in s.parts[2].flat.pitches]), '[E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5, E5]')
sSounding = s.toSoundingPitch(inPlace=False)
self.assertEqual(self.pitchOut([p for p in sSounding.parts[0].flat.pitches]), '[A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4]')
self.assertEqual(self.pitchOut([p for p in sSounding.parts[1].flat.pitches]), '[A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4]')
self.assertEqual(self.pitchOut([p for p in sSounding.parts[2].flat.pitches]), '[A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4]')
# chordification by default places notes at sounding pitch
sChords = s.chordify()
self.assertEqual(self.pitchOut([p for p in sChords.flat.pitches]), '[A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4, A4]')
#sChords.show()
def testInstrumentTranspositionC(self):
# generate all transpositions on output
from music21 import converter
from music21.musicxml import testPrimitive
s = converter.parse(testPrimitive.transposing01)
self.assertEqual(len(s.flat.getElementsByClass('Instrument')), 7)
#s.show()
def testHarmonyA(self):
from music21 import corpus
s = corpus.parse('leadSheet/berlinAlexandersRagtime.xml')
self.assertEqual(len(s.flat.getElementsByClass('ChordSymbol')), 19)
match = [h.chordKind for h in s.flat.getElementsByClass('ChordSymbol')]
self.assertEqual(match, [u'major', u'dominant', u'major', u'major', u'major', u'major', u'dominant', u'major', u'dominant', u'major', u'dominant', u'major', u'dominant', u'major', u'dominant', u'major', u'dominant', u'major', u'major'])
match = [str(h.root()) for h in s.flat.getElementsByClass('ChordSymbol')]
self.assertEqual(match, ['F3', 'C3', 'F3', 'B-2', 'F3', 'C3', 'G2', 'C3', 'C3', 'F3', 'C3', 'F3', 'F2', 'B-2', 'F2', 'F3', 'C3', 'F3', 'C3'])
match = set([str(h.figure) for h in s.flat.getElementsByClass('ChordSymbol')])
self.assertEqual(match, set(['F','F7','B-','C7','G7','C']))
s = corpus.parse('monteverdi/madrigal.3.12.xml')
self.assertEqual(len(s.flat.getElementsByClass('ChordSymbol')), 10)
s = corpus.parse('leadSheet/fosterBrownHair.xml')
self.assertEqual(len(s.flat.getElementsByClass('ChordSymbol')), 40)
#s.show()
def testOrnamentandTechnical(self):
from music21 import corpus
s = corpus.parse('opus133')
ex = s.parts[0]
countTrill = 0
for n in ex.flat.notes:
for e in n.expressions:
if 'Trill' in e.classes:
countTrill += 1
self.assertEqual(countTrill, 54)
# TODO: Get a better test... the single harmonic in the viola part, m. 482 is probably a mistake!
countTechnical = 0
for n in s.parts[2].flat.notes:
for a in n.articulations:
if 'TechnicalIndication' in a.classes:
countTechnical += 1
self.assertEqual(countTechnical, 1)
def testOrnamentC(self):
from music21 import converter
from music21.musicxml import testPrimitive
# has many ornaments
s = converter.parse(testPrimitive.notations32a)
#s.flat.show('t')
self.assertEqual(len(s.flat.getElementsByClass('Tremolo')), 1)
count = 0
for n in s.flat.notes:
for e in n.expressions:
if 'Turn' in e.classes:
count += 1
self.assertEqual(count, 4) # include inverted turn
count = 0
for n in s.flat.notes:
for e in n.expressions:
if 'InvertedTurn' in e.classes:
count += 1
self.assertEqual(count, 1)
count = 0
for n in s.flat.notes:
for e in n.expressions:
if 'Shake' in e.classes:
count += 1
self.assertEqual(count, 1)
count = 0
for n in s.flat.notes:
for e in n.expressions:
if 'Schleifer' in e.classes:
count += 1
self.assertEqual(count, 1)
def testTextBoxA(self):
from music21 import converter
from music21.musicxml import testPrimitive
s = converter.parse(testPrimitive.textBoxes01)
tbs = s.flat.getElementsByClass('TextBox')
self.assertEqual(len(tbs), 5)
msg = []
for tb in tbs:
msg.append(tb.content)
self.assertEqual(msg, [u'This is a text box!', u'pos 200/300 (lower left)', u'pos 1000/300 (lower right)', u'pos 200/1500 (upper left)', u'pos 1000/1500 (upper right)'])
def testImportSlursA(self):
from music21 import corpus
# this is a good test as this encoding uses staffs, not parts
# to encode both parts; this requires special spanner handling
s = corpus.parse('mozart/k545/movement1_exposition')
sf = s.flat
slurs = sf.getElementsByClass(spanner.Slur)
# TODO: this value should be 2, but due to staff encoding we
# have orphaned spanners that are not cleaned up
self.assertEqual(len(slurs), 4)
n1, n2 = s.parts[0].flat.notes[3], s.parts[0].flat.notes[5]
#environLocal.printDebug(['n1', n1, 'id(n1)', id(n1), slurs[0].getSpannedElementIds(), slurs[0].getSpannedElementIds()])
self.assertEqual(id(n1) == slurs[0].getSpannedElementIds()[0], True)
self.assertEqual(id(n2) == slurs[0].getSpannedElementIds()[1], True)
#environLocal.printDebug(['n2', n2, 'id(n2)', id(n2), slurs[0].getSpannedElementIds()])
def testImportWedgeA(self):
from music21 import converter
from music21.musicxml import testPrimitive
s = converter.parse(testPrimitive.spanners33a)
self.assertEqual(len(s.flat.getElementsByClass('Crescendo')), 1)
self.assertEqual(len(s.flat.getElementsByClass('Diminuendo')), 1)
def testImportWedgeB(self):
from music21 import converter
from music21.musicxml import testPrimitive
# this produces a single component cresc
s = converter.parse(testPrimitive.directions31a)
self.assertEqual(len(s.flat.getElementsByClass('Crescendo')), 2)
def testBracketImportB(self):
from music21 import converter
from music21.musicxml import testPrimitive
s = converter.parse(testPrimitive.spanners33a)
#s.show()
self.assertEqual(len(s.flat.getElementsByClass('Line')), 6)
def testTrillExtensionImportA(self):
from music21 import converter
from music21.musicxml import testPrimitive
s = converter.parse(testPrimitive.notations32a)
#s.show()
self.assertEqual(len(s.flat.getElementsByClass('TrillExtension')), 2)
def testGlissandoImportA(self):
from music21 import converter
from music21.musicxml import testPrimitive
s = converter.parse(testPrimitive.spanners33a)
#s.show()
self.assertEqual(len(s.flat.getElementsByClass('Glissando')), 1)
def testImportDashes(self):
# dashes are imported as Lines (as are brackets)
from music21 import converter
from music21.musicxml import testPrimitive
s = converter.parse(testPrimitive.spanners33a)
self.assertEqual(len(s.flat.getElementsByClass('Line')), 6)
def testImportGraceA(self):
from music21 import converter
from music21.musicxml import testPrimitive
s = converter.parse(testPrimitive.graceNotes24a)
#s.show()
match = [str(p) for p in s.pitches]
#print match
self.assertEqual(match, ['D5', 'C5', 'E5', 'D5', 'C5', 'D5', 'C5', 'D5', 'C5', 'D5', 'C5', 'E5', 'D5', 'C5', 'D5', 'C5', 'D5', 'C5', 'E5', 'E5', 'F4', 'C5', 'D#5', 'C5', 'D-5', 'A-4', 'C5', 'C5'])
def testBarException(self):
mxBarline = mxObjects.Barline()
mxBarline.set('barStyle', 'light-heavy')
#Rasing the BarException
self.assertRaises(bar.BarException, mxToRepeat, mxBarline)
mxRepeat = mxObjects.Repeat()
mxRepeat.set('direction', 'backward')
mxBarline.set('repeatObj', mxRepeat)
#all fine now, no exceptions here
mxToRepeat(mxBarline)
#Raising the BarException
mxBarline.set('barStyle', 'wunderbar')
self.assertRaises(bar.BarException, mxToRepeat, mxBarline)
#-------------------------------------------------------------------------------
# define presented order in documentation
_DOC_ORDER = [mxScoreToScore]
if __name__ == "__main__":
# sys.arg test options will be used in mainTest()
import music21
music21.mainTest(Test)
#------------------------------------------------------------------------------
# eof | en | 0.661207 | # -*- coding: utf-8 -*- #------------------------------------------------------------------------------- # Name: musicxml/fromMxObjects.py # Purpose: Translate from MusicXML mxObjects to music21 objects # # Authors: <NAME> # <NAME> # # Copyright: Copyright © 2010-2013 <NAME> and the music21 Project # License: LGPL or BSD, see license.txt #------------------------------------------------------------------------------- Low-level conversion routines from MusicXML to music21. This module supposes that the musicxml document has already been parsed by xml.sax (by base.Document.read() ) and is stored as a collection of mxObjects -- equivalent parsing methods could be created and fed into `mxScoreToScore` to make this work. # modules that import this include converter.py. # thus, cannot import these here # for chord symbols # for transposing instruments # for text boxes #------------------------------------------------------------------------------- # def mod6IdLocal(spannerObj): # ''' # returns the spanner idLocal as a number from 1-6 since # only 6 spanners of each type can be active at a time in musicxml # # # >>> s = stream.Score() # >>> for i in range(10): # ... sp = spanner.Glissando() # ... sp.idLocal = i + 1 # ... s.insert(0, sp) # >>> for sp in s.getElementsByClass('Spanner'): # ... print sp.idLocal, musicxml.fromMxObjects.mod6IdLocal(sp) # 1 1 # 2 2 # 3 3 # 4 4 # 5 5 # 6 6 # 7 1 # 8 2 # 9 3 # 10 4 # ''' # spanId = spannerObj.idLocal # if spanId is None: # return 1 # mod6Id = spanId % 6 # if mod6Id == 0: # mod6Id = 6 # return mod6Id Given an already instantiated spanner.StaffGroup, configure it with parameters from an mxPartGroup. Convert a MusicXML credit to a music21 TextBox >>> c = musicxml.mxObjects.Credit() >>> c.append(musicxml.mxObjects.CreditWords('testing')) >>> c.set('page', 2) >>> tb = musicxml.fromMxObjects.mxCreditToTextBox(c) >>> tb.page 2 >>> tb.content 'testing' # can iterate # no text defined # join with \n # take formatting from the first, no matter if multiple are defined Convert a MusicXML Transpose object to a music21 Interval object. >>> t = musicxml.mxObjects.Transpose() >>> t.diatonic = -1 >>> t.chromatic = -2 >>> musicxml.fromMxObjects.mxTransposeToInterval(t) <music21.interval.Interval M-2> >>> t = musicxml.mxObjects.Transpose() >>> t.diatonic = -5 >>> t.chromatic = -9 >>> musicxml.fromMxObjects.mxTransposeToInterval(t) <music21.interval.Interval M-6> >>> t = musicxml.mxObjects.Transpose() >>> t.diatonic = 3 # a type of 4th >>> t.chromatic = 6 >>> musicxml.fromMxObjects.mxTransposeToInterval(t) <music21.interval.Interval A4> # NOTE: presently not dealing with double # doubled one octave down from what is currently written # (as is the case for mixed cello / bass parts in orchestral literature) #environLocal.printDebug(['ds', ds, 'cs', cs, 'oc', oc]) # diatonic step can be used as a generic specifier here if # shifted 1 away from zero # assume we have chromatic; may not be correct spelling Given an mxMetronome, convert to either a TempoIndication subclass, either a tempo.MetronomeMark or tempo.MetricModulation. >>> m = musicxml.mxObjects.Metronome() >>> bu = musicxml.mxObjects.BeatUnit('half') >>> pm = musicxml.mxObjects.PerMinute(125) >>> m.append(bu) >>> m.append(pm) >>> musicxml.fromMxObjects.mxToTempoIndication(m) <music21.tempo.MetronomeMark Half=125.0> # get lists of durations and texts # add one dot each time these are encountered # should come last #environLocal.printDebug(['found PerMinute', mxObj]) # store as a number #environLocal.printDebug(['found metric modulaton:', 'durations', durations]) # all we have are referents, no values are defined in musicxml # will need to update context after adding to Stream #environLocal.printDebug(['found metronome mark:', 'numbers', numbers]) # TODO: set text if defined in words Given an mxBarline (not an mxRepeat object) with repeatObj as a parameter, file the necessary parameters and return a bar.Repeat() object >>> mxRepeat = musicxml.mxObjects.Repeat() >>> mxRepeat.set('direction', 'backward') >>> mxRepeat.get('times') == None True >>> mxBarline = musicxml.mxObjects.Barline() >>> mxBarline.set('barStyle', 'light-heavy') >>> mxBarline.set('repeatObj', mxRepeat) >>> b = musicxml.fromMxObjects.mxToRepeat(mxBarline) >>> b <music21.bar.Repeat direction=end> Test that the music21 style for a backwards repeat is called "final" (because it resembles a final barline) but that the musicxml style is called light-heavy. >>> b.style 'final' >>> b.direction 'end' >>> mxBarline2 = musicxml.toMxObjects.repeatToMx(b) >>> mxBarline2.get('barStyle') 'light-heavy' #environLocal.printDebug(['mxRepeat', mxRepeat, mxRepeat._attr]) # make into a number Given an mxBarline, fill the necessary parameters >>> mxBarline = musicxml.mxObjects.Barline() >>> mxBarline.set('barStyle', 'light-light') >>> mxBarline.set('location', 'right') >>> b = musicxml.fromMxObjects.mxToBarline(mxBarline) >>> b.style # different in music21 than musicxml 'double' >>> b.location 'right' #------------------------------------------------------------------------------- Given a completely formed, non-grace Note or Chord, create and return a m21 grace version of the same. If mxGrace is None, no change is made and the same object is returned. #------------------------------------------------------------------------------- # Pitch and pitch components >>> a = musicxml.mxObjects.Accidental() >>> a.set('content', 'half-flat') >>> a.get('content') 'half-flat' >>> b = pitch.Accidental() >>> bReference = musicxml.fromMxObjects.mxToAccidental(a, b) >>> b is bReference True >>> b.name 'half-flat' >>> b.alter -0.5 # need to use set here to get all attributes up to date Given a MusicXML Note object, set this Pitch object to its values. >>> b = musicxml.mxObjects.Pitch() >>> b.set('octave', 3) >>> b.set('step', 'E') >>> b.set('alter', -1) >>> c = musicxml.mxObjects.Note() >>> c.set('pitch', b) >>> a = pitch.Pitch('g#4') >>> a = musicxml.fromMxObjects.mxToPitch(c) >>> print(a) E-3 # assume this is an object # sometimes we have an accidental defined but no alter value, due to # a natural; need to look at mxAccidental directly #environLocal.printDebug(['found mxAccidental charData', mxAccidentalCharData]) # None is used in musicxml but not in music21 # the source had wanted to show alter # used to to just use acc value # self.accidental = Accidental(float(acc)) # better to use accObj if possible # MuseScore 0.9.6 generates Accidentals with empty objects # here we generate an accidental object from the alter value # but in the source, there was not a defined accidental #------------------------------------------------------------------------------- # Ties Translate a MusicXML :class:`~music21.musicxml.mxObjects.Note` (sic!) to a music21 :class:`~music21.tie.Tie` object according to its <tieList> parameter. Only called if the mxObjects.Note has a tieList that is not blank, so as not to create additional ties. # get all types and see what we have for this note # trivial case: have only 1 #self.type = 'start' # from old note.py code # not sure this is necessary # mxNotations = mxNote.get('notations') # if mxNotations != None: # mxTiedList = mxNotations.getTieds() # should be sufficient to only get mxTieList #------------------------------------------------------------------------------- # Lyrics Translate a MusicXML :class:`~music21.musicxml.mxObjects.Lyric` object to a music21 :class:`~music21.note.Lyric` object. If inputM21 is a :class:`~music21.note.Lyric` object, then the values of the mxLyric are transfered there and nothing returned. Otherwise, a new `Lyric` object is created and returned. >>> mxLyric = musicxml.mxObjects.Lyric() >>> mxLyric.set('text', 'word') >>> mxLyric.set('number', 4) >>> mxLyric.set('syllabic', 'single') >>> lyricObj = note.Lyric() >>> musicxml.fromMxObjects.mxToLyric(mxLyric, lyricObj) >>> lyricObj <music21.note.Lyric number=4 syllabic=single text="word"> Non-numeric MusicXML lyric "number"s are converted to identifiers: >>> mxLyric.set('number', 'part2verse1') >>> l2 = musicxml.fromMxObjects.mxToLyric(mxLyric) >>> l2 <music21.note.Lyric number=0 identifier="part2verse1" syllabic=single text="word"> # This is new to account for identifiers #If musicXML lyric number is not a number, set it to 0. This tells the caller of #mxToLyric that a new number needs to be given based on the lyrics context amongst other lyrics. # Used to be l.number = mxLyric.get('number') #------------------------------------------------------------------------------- # Durations Utility function to convert a MusicXML duration type to an music21 duration type. Changes 'long' to 'longa' and deals with a Guitar Pro 5.2 bug in MusicXML export, that exports a 32nd note with the type '32th'. >>> musicxml.fromMxObjects.musicXMLTypeToType('long') 'longa' >>> musicxml.fromMxObjects.musicXMLTypeToType('32th') '32nd' >>> musicxml.fromMxObjects.musicXMLTypeToType('quarter') 'quarter' >>> musicxml.fromMxObjects.musicXMLTypeToType(None) Traceback (most recent call last): FromMxObjectsException... # MusicXML uses long instead of longa Translate a `MusicXML` :class:`~music21.musicxml.mxObjects.Note` object to a music21 :class:`~music21.duration.Duration` object. :: >>> a = musicxml.mxObjects.Note() >>> a.setDefaults() >>> m = musicxml.mxObjects.Measure() >>> m.setDefaults() >>> a.external['measure'] = m # assign measure for divisions ref >>> a.external['divisions'] = m.external['divisions'] >>> c = duration.Duration() >>> musicxml.fromMxObjects.mxToDuration(a, c) <music21.duration.Duration 1.0> >>> c.quarterLength 1.0 # some rests do not define type, and only define duration # no type to get, must use raw # divide mxNote duration count by divisions to get qL # mxNotations = mxNote.get('notationsObj') # get all necessary config from mxNote #environLocal.printDebug(['created Tuplet', tup]) # need to see if there is more than one component #self.components[0]._tuplets.append(tup) # two ways to create durations, raw and cooked #environLocal.printDebug(['forced to use raw duration', durRaw]) # raw just uses qLen # the qLen set here may not be computable, but is not immediately # computed until setting components # a cooked version builds up from pieces # old way just used qLen #self.quarterLength = qLen # if mxNote.duration is None, this is a grace note, and duration # is based entirely on type #environLocal.printDebug(['got mx duration of None', d]) Translate a MusicXML :class:`~music21.musicxml.mxObjects.Direction` with an offset value to an offset in music21. #environLocal.printDebug(['mxDirection.offset', mxDirection.offset, 'mxDivisions', mxDivisions]) Given an mxNote, based on mxTimeModification and mxTuplet objects, return a Tuplet object (or alter the input object and then return it) #environLocal.printDebug(['got mxTimeModification', mxTimeModification]) # TODO: implement dot # mxNormalDot = mxTimeModification.get('normal-dot') # this value does not seem to frequently be supplied by mxl # encodings, unless it is different from the main duration # this sets both actual and noraml types to the same type # set to type of duration #environLocal.printDebug(['got mxNotations', mxNotations]) # a list, but only use first #environLocal.printDebug(['got mxTuplet', mxTuplet]) #environLocal.printDebug(['got bracket', self.bracket]) #------------------------------------------------------------------------------- # Meters Given an mxTimeList, load this object if inputM21 is None, create a new TimeSignature and return it. >>> mxTime = musicxml.mxObjects.Time() >>> mxTime.setDefaults() >>> mxAttributes = musicxml.mxObjects.Attributes() >>> mxAttributes.timeList.append(mxTime) >>> ts = meter.TimeSignature() >>> musicxml.fromMxObjects.mxToTimeSignature(mxAttributes.timeList, ts) >>> ts.numerator 4 # if just one # there may be more than one if we have more staffs per part # may be 3+2 #n = mxTime.get('beats') #d = mxTime.get('beat-type') # convert into a string #environLocal.printDebug(['loading meter string:', '+'.join(msg)]) #-------------------------------------------------------- # Key/KeySignatures Given a mxKey object or keyList, return a music21.key.KeySignature object and return it, or if inputM21 is None, change its attributes and return nothing. >>> mxk = musicxml.mxObjects.Key() >>> mxk.set('fifths', 5) >>> ks = key.KeySignature() >>> musicxml.fromMxObjects.mxKeyListToKeySignature(mxk, ks) >>> ks.sharps 5 Or just get a new KeySignature object from scratch: >>> mxk.set('fifths', -2) >>> ks2 = musicxml.fromMxObjects.mxKeyListToKeySignature(mxk) >>> ks2 <music21.key.KeySignature of 2 flats> # there may be more than one if we have more staffs per part #-------------------------------------------------------- # clefs Given a MusicXML Clef object, return a music21 Clef object >>> a = musicxml.mxObjects.Clef() >>> a.set('sign', 'G') >>> a.set('line', 2) >>> b = clef.Clef() >>> b <music21.clef.Clef> >>> 'TrebleClef' in b.classes False >>> musicxml.fromMxObjects.mxClefToClef(a, b) >>> b.sign 'G' >>> 'TrebleClef' in b.classes True >>> b <music21.clef.TrebleClef> Create a new clef from thin air: >>> a = musicxml.mxObjects.Clef() >>> a.set('sign', 'TAB') >>> c = musicxml.fromMxObjects.mxClefToClef(a) >>> c <music21.clef.TabClef> # its not a list # just get first for now #------------------------------------------------------------------------------- # Dynamics Given an mxDirection, load instance >>> mxDirection = musicxml.mxObjects.Direction() >>> mxDirectionType = musicxml.mxObjects.DirectionType() >>> mxDynamicMark = musicxml.mxObjects.DynamicMark('ff') >>> mxDynamics = musicxml.mxObjects.Dynamics() >>> mxDynamics.set('default-y', -20) >>> mxDynamics.append(mxDynamicMark) >>> mxDirectionType.append(mxDynamics) >>> mxDirection.append(mxDirectionType) >>> a = dynamics.Dynamic() >>> a = musicxml.fromMxObjects.mxToDynamicList(mxDirection)[0] >>> a.value 'ff' >>> a.englishName 'very loud' >>> a._positionDefaultY -20 # can probably replace this with mxDirection.getDynamicMark() # need to test # if len(mxDynamics) > 1: # raise dynamics.DynamicException('when importing a Dynamics object from MusicXML, ' # 'found more than one DynamicMark contained, namely %s' % # str(mxDynamics)) # palcement is found in outermost object # the tag is the dynamic mark value Given an mxDirection, create one or more TextExpressions #environLocal.printDebug(['mxToTextExpression()', mxWords, mxWords.charData]) # content can be passed with creation argument # two parameters that are combined # one is None Translate a MusicXML :class:`~music21.musicxml.mxObjects.Coda` object to a music21 :class:`~music21.repeat.Coda` object. Translate a MusicXML :class:`~music21.musicxml.mxObjects.Coda` object to a music21 :class:`~music21.repeat.Coda` object. Given an mxDirection that may define a coda, segno, or other repeat expression statement, realize the appropriate music21 object. # note: this may not be needed, as mx text expressions are converted to repeat objects in measure processing #------------------------------------------------------------------------------- # Harmony Convert a musicxml.mxObjects.Harmony() object to a harmony.ChordSymbol object: :: >>> mxHarmony = musicxml.mxObjects.Harmony() >>> mxKind = musicxml.mxObjects.Kind() >>> mxKind.charData = 'major-seventh' >>> mxHarmony.kindObj = mxKind >>> mxRoot = musicxml.mxObjects.Root() >>> mxRoot.set('root-step', 'D') >>> mxRoot.set('root-alter', '-1') >>> mxHarmony.rootObj = mxRoot >>> cs = musicxml.fromMxObjects.mxToChordSymbol(mxHarmony) >>> cs <music21.harmony.ChordSymbol D-maj7> :: >>> cs.figure 'D-maj7' :: >>> cs.pitches (<music21.pitch.Pitch D-3>, <music21.pitch.Pitch F3>, <music21.pitch.Pitch A-3>, <music21.pitch.Pitch C4>) :: >>> cs.root() <music21.pitch.Pitch D-3> TODO: this is very classically-oriented. Make it more Jazz/Rock like. :: >>> mxKind.charData = 'major-sixth' >>> cs = musicxml.fromMxObjects.mxToChordSymbol(mxHarmony) >>> cs <music21.harmony.ChordSymbol D-6> :: >>> cs.figure 'D-6' :: >>> cs.pitches (<music21.pitch.Pitch D-3>, <music21.pitch.Pitch F3>, <music21.pitch.Pitch A-3>, <music21.pitch.Pitch B-3>) :: >>> cs.root() <music21.pitch.Pitch D-3> #environLocal.printDebug(['mxToChordSymbol():', mxHarmony]) # can provide integer to create accidental on pitch # set Pitch object on Harmony # can provide integer to create accidental on pitch # set Pitch object on Harmony #set the bass to the root if root is none # must be an int # goes to roman property # a list of components # this is the assumed order of triples # already set # must get last on loop exit #environLocal.printDebug(['mxToHarmony(): Harmony object', h]) #------------------------------------------------------------------------------- # Instruments Return a generic instrument.Instrument object from this mxScorePart # note: transposition values is not set in this operation, but in # mxToStreamPart # need to remove badly-formed strings # for now, just get first instrument # for now, just get first midi instrument # musicxml counts from 1, not zero #------------------------------------------------------------------------------- # unified processors for Chords and Notes General routines for gathering spanners from notes via mxNotations objects and placing them in a spanner bundle. Spanners may be found in musicXML notations and directions objects. The passed-in spannerBundle will be edited in-place; existing spanners may be completed, or new spanners may be added. The `target` object is a reference to the relevant music21 object this spanner is associated with. # look at all spanners and see if we have an open, matching # slur to place this in # returns a new spanner bundle with just the result of the search #environLocal.printDebug(['spanner bundle: getByCompleteStatus(False)', spannerBundle.getByCompleteStatus(False)]) #sb = spannerBundle.getByIdLocal(idFound).getByCompleteStatus(False) # if we already have a slur #environLocal.printDebug(['found a match in SpannerBundle']) # get the first # create a new slur # add a reference of this note to this spanner #environLocal.printDebug(['adding n', n, id(n), 'su.getSpannedElements', su.getSpannedElements(), su.getSpannedElementIds()]) # only add after complete #environLocal.printDebug(['waveyLines', mxObj]) # if we already have # get the first # create a new spanner # add a reference of this note to this spanner # only add after complete # if we already have # get the first # create a new spanner #su.placement = mxObj.get('placement') # add a reference of this note to this spanner # can be stop or None; we can have empty single-element tremolo # only add after complete # if we already have # get the first # create a new spanner # add a reference of this note to this spanner # only add after complete Some spanners, such as MusicXML octave-shift, are encoded as MusicXML directions. #environLocal.printDebug(['mxDirectionToSpanners', 'found mxWedge', mxType, idFound]) # define this spanner as needing component assignment from # the next general note # need to retrieve an existing spanner # try to get base class of both Crescendo and Decrescendo # get first # will only have a target if this follows the note #environLocal.printDebug(['mxDirectionToSpanners', 'found mxBracket', mxType, idFound]) # define this spanner as needing component assignment from # the next general note # need to retrieve an existing spanner # try to get base class of both Crescendo and Decrescendo # get first # will only have a target if this follows the note # import mxDashes as m21 Line objects #environLocal.printDebug(['mxDirectionToSpanners', 'found mxDashes', mxType, idFound]) # define this spanner as needing component assignment from # the next general note # need to retrieve an existing spanner # try to get base class of both Crescendo and Decrescendo # get first # will only have a target if this follows the note #------------------------------------------------------------------------------- Convert an mxFermata object to a music21 expressions.Fermata object. If inputM21 is None, creates a new Fermata object and returns it. Otherwise changes the current Fermata object and returns nothing. >>> mxFermata = musicxml.mxObjects.Fermata() >>> mxFermata.set('type', 'inverted') >>> fermata = musicxml.fromMxObjects.mxFermataToFermata(mxFermata) >>> fermata.type 'inverted' Convert an mxTechnicalMark to a music21.articulations.TechnicalIndication object or one of its subclasses. Example: Provided an musicxml.mxObjects.TechnicalMark object (not an mxTechnical object) configure the music21 object. Create both a musicxml.mxObjects.ArticulationMark object and a conflicting music21 object: >>> mxTechnicalMark = musicxml.mxObjects.TechnicalMark('up-bow') >>> mxTechnicalMark.set('placement', 'below') >>> a = articulations.DownBow() >>> a.placement = 'above' Now override the music21 object with the mxArticulationMark object's characteristics >>> musicxml.fromMxObjects.mxTechnicalToArticulation(mxTechnicalMark, inputM21 = a) >>> 'DownBow' in a.classes False >>> 'UpBow' in a.classes True >>> a.placement 'below' Convert an mxArticulationMark to a music21.articulations.Articulation object or one of its subclasses. Example: Provided an musicxml.mxObjects.ArticulationMark object (not an mxArticulations object) configure the music21 object. Create both a musicxml.mxObjects.ArticulationMark object and a conflicting music21 object: >>> mxArticulationMark = musicxml.mxObjects.ArticulationMark('accent') >>> mxArticulationMark.set('placement', 'below') >>> a = articulations.Tenuto() >>> a.placement = 'above' Now override the music21 object with the mxArticulationMark object's characteristics >>> musicxml.fromMxObjects.mxArticulationToArticulation(mxArticulationMark, inputM21 = a) >>> 'Tenuto' in a.classes False >>> 'Accent' in a.classes True >>> a.placement 'below' Convert mxOrnament into a music21 ornament. This only processes non-spanner ornaments. Many mxOrnaments are spanners: these are handled elsewhere. Returns None if cannot be converted or not defined. #environLocal.printDebug(['calling mxOrnamentToExpressionOrArticulation with', mxOrnament]) # may be None #------------------------------------------------------------------------------- # Chords Given an a list of mxNotes, fill the necessary parameters >>> a = musicxml.mxObjects.Note() >>> p = musicxml.mxObjects.Pitch() >>> p.set('step', 'A') >>> p.set('octave', 3) >>> a.setDefaults() >>> a.set('pitch', p) >>> b = musicxml.mxObjects.Note() >>> b.setDefaults() >>> b.set('chord', True) >>> m = musicxml.mxObjects.Measure() >>> m.setDefaults() >>> a.external['measure'] = m # assign measure for divisions ref >>> a.external['divisions'] = m.external['divisions'] >>> b.external['measure'] = m # assign measure for divisions ref >>> b.external['divisions'] = m.external['divisions'] >>> c = musicxml.fromMxObjects.mxToChord([a, b]) >>> len(c.pitches) 2 >>> c.pitches[0] <music21.pitch.Pitch A3> >>> a = musicxml.mxObjects.Note() >>> a.setDefaults() >>> nh1 = musicxml.mxObjects.Notehead() >>> nh1.set('charData', 'diamond') >>> a.noteheadObj = nh1 >>> b = musicxml.mxObjects.Note() >>> b.setDefaults() >>> b.set('chord', True) >>> m = musicxml.mxObjects.Measure() >>> m.setDefaults() >>> a.external['measure'] = m # assign measure for divisions ref >>> a.external['divisions'] = m.external['divisions'] >>> b.external['measure'] = m # assign measure for divisions ref >>> b.external['divisions'] = m.external['divisions'] >>> c = musicxml.fromMxObjects.mxToChord([a, b]) >>> c.getNotehead(c.pitches[0]) 'diamond' #environLocal.printDebug(['mxToNote()', 'creating SpannerBundle']) # if we are passed in as spanner bundle, look for any pending # component assignments # assume that first chord is the same duration for all parts # assume that first note in list has a grace object (and all do) # store equally spaced list; use None if not defined # store notehead attributes that correspond with pitches # store stem direction attributes that correspond with pitches # extract pitch pbjects #extract notehead objects; may be None #extract stem directions #environLocal.printDebug(['found tie in chord', tieObj]) # need place holder for each pitch # set all at once # set beams from first note of chord # set ties based on pitches # provide pitch to assign tie to based on index number #set notehead based on pitches # set color per pitch #set stem direction based upon pitches #------------------------------------------------------------------------------- # Notes Translate a MusicXML :class:`~music21.musicxml.mxObjects.Note` to a :class:`~music21.note.Note`. The `spannerBundle` parameter can be a list or a Stream for storing and processing Spanner objects. If inputM21 is not `None` then that object is used for translating. Otherwise a new Note is created. Returns a `note.Note` object. >>> mxNote = musicxml.mxObjects.Note() >>> mxNote.setDefaults() >>> mxMeasure = musicxml.mxObjects.Measure() >>> mxMeasure.setDefaults() >>> mxMeasure.append(mxNote) >>> mxNote.external['measure'] = mxMeasure # manually create ref >>> mxNote.external['divisions'] = mxMeasure.external['divisions'] >>> n = musicxml.fromMxObjects.mxToNote(mxNote) >>> n <music21.note.Note C> # required info will be taken from entire note # gets the notehead object from the mxNote and sets value of the music21 note # to the value of the notehead object # after this, use combined function for notes and rests... Translate a MusicXML :class:`~music21.musicxml.mxObjects.Note` object to a :class:`~music21.note.Rest`. If an `inputM21` object reference is provided, this object will be configured; otherwise, a new :class:`~music21.note.Rest` object is created and returned. helper function for things common to notes and rests. n can be a note or rest... # doing this will create an instance, but will not be passed # out of this method, and thus is only for testing #environLocal.printDebug(['mxToNote()', 'creating SpannerBundle']) # if we are passed in as spanner bundle, look for any pending # component assignments # print object == 'no' and grace notes may have a type but not # a duration. they may be filtered out at the level of Stream # processing #environLocal.printDebug(['got mxNote with printObject == no']) #environLocal.printDebug(['mxGrace', mxGrace, mxNote, n.duration]) # in some casses grace notes may not have an assigned duration type # this default type is set here, before assigning to n.duration #environLocal.printDebug(['mxToNote', 'mxNote that is a grace missing duration type']) # the n.duration object here will be configured based on mxNote # get color from Note first; if not, try to get from notehead # get x-positioning if any... # can use mxNote.tieList instead # m21 tie object # provide entire Note # n.tie is defined in GeneralNote as None by default # things found in notations object: # articulations, slurs # get a list of mxArticulationMarks, not mxArticulations # get any technical marks, a list of mxTechnicalMarks, not mxTechnical # they live with articulations # get any fermatas, store on expressions # if len(mxOrnamentsList) > 0: # environLocal.printDebug(['mxOrnamentsList:', mxOrnamentsList]) #environLocal.printDebug(['adding to epxressions', post]) # create spanners: # translate if necessary, otherwise leaves unchanged #------------------------------------------------------------------------------ # Defaults Convert a :class:`~music21.musicxml.mxObjects.Defaults` object to a :class:`~music21.layout.ScoreLayout` object # according to xsd can be more than one. meaning? #------------------------------------------------------------------------------- # Measures Utility routine for importing musicXML objects; here, we store a reference to the music21 object in a dictionary, where keys are the staff values. Staff values may be None, 1, 2, etc. #environLocal.printDebug(['addToStaffReference(): called with:', music21Object]) # if a chord, get the first components # if an empty list # add to staff reference # some objects store staff assignment simply as number # a normal number Translate an mxMeasure (a MusicXML :class:`~music21.musicxml.mxObjects.Measure` object) into a music21 :class:`~music21.stream.Measure`. If an `inputM21` object reference is provided, this object will be configured and returned; otherwise, a new :class:`~music21.stream.Measure` object is created. The `spannerBundle` that is passed in is used to accumulate any created Spanners. This Spanners are not inserted into the Stream here. Returns a tuple of (music21.stream.Measure object, staffReference (a dictionary for partStaffs of elements that only belong to a single staff), and a transposition) # staff assignments: can create a dictionary with components in each # staff; this dictionary will then be used to copy this measure and # split components between two parts of more than one staff is defined # doing this will create an instance, but will not be passed # out of this method, and thus is only for testing #environLocal.printDebug(['mxToMeasure()', 'creating SpannerBundle']) # assume that measure numbers are integers # fix for Finale which calls unnumbered measures X1, X2, etc. which # we convert to 1.X, 2.X, etc. without this... # may need to do a format/unit conversion? # not yet implemented # if we do not have defined mxAttributes, must get from stored attributes # need to keep track of where mxattributes src is coming from # if attributes are defined in this measure, mxAttributesInternal # is true # not all measures have attributes definitions; this # gets the last-encountered measure attributes #environLocal.printDebug(['mxAttriutes clefList', mxAttributes.clefList, # mxAttributesInternal]) # getting first for each of these for now # perhaps we've already put a staffLayout into the measure? #else: # print mxStaffDetails._attr #staffLayoutsAlreadySetList.append(stl) #print "Got an mxStaffDetails %r" % mxStaffDetails # transposition may be defined for a Part in the Measure attributes # get interval object #environLocal.printDebug(['mxToMeasure: got transposition', transposition]) # count from zero # iterate through components found on components list # set to zero for each measure # offset of note w/n measure # for accumulating notes in chords # for accumulating lyrics assigned to chords # store the last-create music21 note for Spanners # Sibelius 7.1 only puts a <voice> tag on the # first note of a chord, so we need to make sure # that we keep track of the last voice... # try to get the next object for chord comparisons #environLocal.printDebug(['handling', mxObj]) # NOTE: tests have shown that using isinstance() here is much faster # than checking the .tag attribute. # check for backup and forward first # resolve as quarterLength, subtract from measure offset #environLocal.printDebug(['found musicxl backup:', mxObj.duration]) # resolve as quarterLength, add to measure offset #environLocal.printDebug(['found musicxl forward:', mxObj.duration, 'divisions', divisions]) # mxPrint objects may be found in a Measure's components # contain page or system layout information among others # false for No?? # false for No? #--- now we know what we need to add, add em # store at zero position # store at zero position # <sound> tags may be found in the Measure, used to define tempo # repeat is a tag found in the barline object # barline objects also store ending objects, that mark begin # and end of repeat bracket designations #environLocal.printDebug(['found mxEndingObj', mxEndingObj, 'm', m]) # get all incomplete spanners of the appropriate class that are # not complete # if we have no complete bracket objects, must start a new one # create with this measure as the object # there may just be an ending marker, and no start # this implies just one measure # set number; '' or None is interpreted as 1 # if we have any incomplete, this must be the end #environLocal.printDebug(['matching RepeatBracket spanner', 'len(rbSpanners)', len(rbSpanners)]) # get RepeatBracket # try to add this measure; may be the same # in general, any rb found should be the opening, and thus # this is the closing; can check #environLocal.printDebug(['setting left barline', barline]) #environLocal.printDebug(['setting right barline', barline]) #environLocal.printDebug(['got mxNote with printObject == no', 'measure number', m.number]) # mxGrace = mxNote.get('graceObj') # if mxGrace is not None: # graces have a type but not a duration # #environLocal.printDebug(['got mxNote with an mxGrace', 'duration', mxNote.get('duration'), 'measure number', # #m.number]) # continue # the first note of a chord is not identified directly; only # by looking at the next note can we tell if we have the first # note of a chord # set the first as a chord # it is a note # if a chord, do not increment until chord is complete # store lyrics for latter processing # update # if mxNote.get('notationsObj') is not None: # for mxObjSub in mxNote.get('notationsObj'): # # deal with ornaments, trill, etc # pass # its a rest #m.insert(offsetMeasureNote, n) # this can happen when a part defines multiple staves # where one staff uses voices but the other staff does not #print m, n, mxNote # update # if we we have notes in the note list and the next # note either does not exist or is not a chord, we # have a complete chord # add any accumulated lyrics # clear for next chord # update # only increment Chords after completion # mxDirections can be dynamics, repeat expressions, text expressions # in rare cases there may be more than one dynamic in the same # direction, so we iterate #m.insert(offsetMeasureNote, d) # TODO: multiple spanners # if mxObj.getWedge() is not None: # w = mxToWedge(mxObj) # addToStaffReference(mxObj, w, staffReference) # m._insertCore(offsetMeasureNote, w) #environLocal.printDebug(['got getMetronome', mxObj.getMetronome()]) # need to look for metronome marks defined above # and look for text defined below # TODO: need to look for tempo words if we have a metro #environLocal.printDebug(['found mxWords object', mxObj]) # convert into a list of TextExpression objects # this may be a TextExpression, or a RepeatExpression #environLocal.printDebug(['got TextExpression object', repr(te)]) # offset here is a combination of the current position # (offsetMeasureNote) and and the direction's offset # the repeat expression stores a copy of the text # expression within it; replace it here on insertion #environLocal.printDebug(['staffReference', staffReference]) # if we have voices and/or if we used backup/forward, we may have # empty space in the stream # do not bother with empty voices # full measure rest with no notes... # should do this on a per voice basis... #------------------------------------------------------------------------------- # Streams Load a part into a new Stream or one provided by `inputM21` given an mxScore and a part name. The `spannerBundle` reference, when passed in, is used to accumulate Spanners. These are not inserted here. Though it is incorrect MusicXML, PDFtoMusic creates empty measures when it should create full measures of rests (possibly hidden). This routine fixes that bug. See http://musescore.org/en/node/15129 #environLocal.printDebug(['calling Stream.mxToStreamPart']) # need a Score to load parts into # in some cases there may be more than one instrument defined # in each score part; this has not been tested # create a new music21 instrument # mxInstrument is a ScorePart # need an mxScorePart here # add part id as group # create a part instance for each part # always assume at sounding, unless transposition is defined in attributes # set part id to stream best name # add instrument at zero offset # offset is in quarter note length # may change at measure boundaries # keep track of whether the last measure was short... # t here is transposition, if defined; otherwise it is None # see http://stackoverflow.com/questions/6062576/adding-information-to-a-python-exception #+ execInfoTuple[1].__name__ # if this is the first #environLocal.printDebug(['transposition', t]) # if not the first measure, need to copy as well # for now, copy Instrument, change transposition, # could insert in part, or in measure # if a transposition is defined in musicxml, we assume it is # at written pitch # store last for comparison # there will be one for each measure # we do this check so that we do not compound suffixes, i.e.: # 23, 23.X1, 23.X1X2, 23.X1X2X3 # and instead just do: # 23, 23.X1, 23.X2, etc. # if no time sigature is defined, need to get a default # add measure to stream at current offset for this measure # note: we cannot assume that the time signature properly # describes the offsets w/n this bar. need to look at # offsets within measure; if the .highestTime value is greater # use this as the next offset ## this routine fixes a bug in PDFtoMusic and other MusicXML writers ## that omit empty rests in a Measure. It is a very quick test if ## the measure has any notes. Slower if it does not. # use time signature # for the first measure, this may be a pickup # must detect this when writing, as next measures offsets will be # incorrect # cannot get bar duration proportion if cannot get a ts #environLocal.printDebug(['incompletely filled Measure found on musicxml import; interpreting as a anacrusis:', 'padingLeft:', m.paddingLeft]) # assume that, even if measure is incomplete, the next bar should # start at the duration given by the time signature, not highestTime ### no...let's not do this... #lastTimeSignatureQuarterLength # probably a pickup after a repeat or phrase boundary or something # if we have multiple staves defined, add more parts, and transfer elements # note: this presently has to look at _idLastDeepCopyOf to get matches # to find removed elements after copying; this is probably not the # best way to do this. # V2.1 -- is not/will not be doing this. in fact idLastDeepCopyOf is # going away... # for this part, if any elements are components in the spannerBundle, # then then we need to update the spannerBundle after the part is copied # set group for components # set group for stream itself # TODO: this does not work with voices; there, Spanners # will be copied into the Score # copy spanners that are complete into the part, as this is the # highest level container that needs them # remove from original spanner bundle # s is the score; adding the aprt to the score # when adding parts to this Score # this assumes all start at the same place # even if there is only one part, it will be placed in a Stream given an mxPart and other necessary information, insert into the score (s) multiple PartStaff objects separating the information for one part from the other # transfer all spanners to the streamPart such that they get # updated in copying, then remove them # remove from original spanner bundle # get staves will return a number, between 1 and count #for staffCount in range(mxPart.getStavesCount()): #environLocal.printDebug(['partIdStaff', partIdStaff, 'copying streamPart']) # this deepcopy is necessary, as we will remove components # in each staff that do not belong # TODO: Do n-1 deepcopies, instead of n, since the last PartStaff can just remove from the original Part # assign this as a PartStaff, a subclass of Part # remove all elements that are not part of this staff # after adjusting voices see if voices can be reduced or # removed #environLocal.printDebug(['calling flattenUnnecessaryVoices: voices before:', len(m.voices)]) #environLocal.printDebug(['calling flattenUnnecessaryVoices: voices after:', len(m.voices)]) # TODO: copying spanners may have created orphaned # spanners that no longer have valid connections # in this part; should be deleted Given a list of staffReference dictionaries, collect and return a list of all unique keys except None # if len(post) > 0: # print post Given a staff reference dictionary, remove and combine in a list all elements that are not part of the given key. Thus, return a list of all entries to remove. It keeps those elements under staff key None (common to all) and those under given key. This then is the list of all elements that should be deleted. Translate an mxScore into a music21 Score object or puts it into the given inputM21 object (which does not necessarily have to be a :class:`~music21.stream.Score` object. It can be any :class:`~music21.stream.Stream` object) All spannerBundles accumulated at all lower levels are inserted here. # TODO: may not want to wait to this leve to insert spanners; may want to # insert in lower positions if it makes sense #print mxPartIds #mxPartIdDictionary = mxScore.partIdToNameDict() # values are part names #partNameIds = mxPartIdDictionary.keys() #partNameIds.sort() #for partId in partNameIds: # part names are part ids # part names are part ids # NOTE: setting partId not partId: might change # return the part; however, it is still already attached to the Score # see http://stackoverflow.com/questions/6062576/adding-information-to-a-python-exception # update dictionary to store music21 part #print("%r %s %r" % (m21PartIdDictionary, partId, part)) # get part/staff groups #environLocal.printDebug(['partgroups:', mxScore.getPartGroupData()]) # a list of dictionaries # create music21 spanner StaffGroup # get music21 part from partIdDictionary # use configuration routine to transfer/set attributes; # sets complete status as well # will be added to the Score # add metadata object; this is placed after all other parts now # these means that both Parts and other objects live on Stream. # store credits on Score stream # insert position does not matter ## get supports information # only insert complete spanners; at each level possible, complete spanners # are inserted into either the Score or the Part # storing complete Part spanners in a Part permits extracting parts with spanners #------------------------------------------------------------------------------ # beam and beams given an mxBeam object return a :class:`~music21.beam.Beam` object >>> mxBeam = musicxml.mxObjects.Beam() >>> mxBeam.set('charData', 'begin') >>> a = musicxml.fromMxObjects.mxToBeam(mxBeam) >>> a.type 'start' >>> mxBeam.set('charData', 'continue') >>> a = musicxml.fromMxObjects.mxToBeam(mxBeam) >>> a.type 'continue' >>> mxBeam.set('charData', 'end') >>> a = musicxml.fromMxObjects.mxToBeam(mxBeam) >>> a.type 'stop' >>> mxBeam.set('charData', 'forward hook') >>> a = musicxml.fromMxObjects.mxToBeam(mxBeam) >>> a.type 'partial' >>> a.direction 'right' >>> mxBeam.set('charData', 'backward hook') >>> a = musicxml.fromMxObjects.mxToBeam(mxBeam) >>> a.type 'partial' >>> a.direction 'left' >>> mxBeam.set('charData', 'crazy') >>> a = musicxml.fromMxObjects.mxToBeam(mxBeam) Traceback (most recent call last): FromMxObjectsException: unexpected beam type encountered (crazy) given a list of mxBeam objects, sets the beamsList >>> a = beam.Beams() >>> a.fill(2, type='start') >>> mxBeamList = musicxml.toMxObjects.beamsToMx(a) >>> b = musicxml.fromMxObjects.mxToBeams(mxBeamList) >>> b <music21.beam.Beams <music21.beam.Beam 1/start>/<music21.beam.Beam 2/start>> #--------------------------------------------------------- # layout Given an mxPrint object, set object data for the print section of a layout.PageLayout object >>> mxPrint = musicxml.mxObjects.Print() >>> mxPrint.set('new-page', 'yes') >>> mxPrint.set('page-number', 5) >>> mxPageLayout = musicxml.mxObjects.PageLayout() >>> mxPageLayout.pageHeight = 4000 >>> mxPageMargins = musicxml.mxObjects.PageMargins() >>> mxPageMargins.set('leftMargin', 20) >>> mxPageMargins.set('rightMargin', 30.2) >>> mxPageLayout.append(mxPageMargins) >>> mxPrint.append(mxPageLayout) >>> pl = musicxml.fromMxObjects.mxPrintToPageLayout(mxPrint) >>> pl.isNew True >>> pl.rightMargin > 30.1 and pl.rightMargin < 30.3 True >>> pl.leftMargin 20.0 >>> pl.pageNumber 5 Alternatively, pass a music21 object into this routine. >>> plAlt = layout.PageLayout() >>> musicxml.fromMxObjects.mxPrintToPageLayout(mxPrint, plAlt) >>> plAlt.pageNumber 5 >>> plAlt.pageHeight 4000.0 >>> plAlt.isNew True # encoded as yes/no in musicxml # blank # find first and break get a PageLayout object from an mxPageLayout Called out from mxPrintToPageLayout because it is also used in the <defaults> tag # may be floating point values Given an mxPrint object, set object data >>> mxPrint = musicxml.mxObjects.Print() >>> mxPrint.set('new-system', 'yes') >>> mxSystemLayout = musicxml.mxObjects.SystemLayout() >>> mxSystemLayout.systemDistance = 55 >>> mxSystemMargins = musicxml.mxObjects.SystemMargins() >>> mxSystemMargins.set('leftMargin', 20) >>> mxSystemMargins.set('rightMargin', 30.2) >>> mxSystemLayout.append(mxSystemMargins) >>> mxPrint.append(mxSystemLayout) >>> sl = musicxml.fromMxObjects.mxPrintToSystemLayout(mxPrint) >>> sl.isNew True >>> sl.rightMargin > 30.1 and sl.rightMargin <= 30.2 True >>> sl.leftMargin 20.0 >>> sl.distance 55.0 # encoded as yes/no in musicxml #mxSystemLayout = mxPrint.get('systemLayout') # blank # find first and break get a SystemLayout object from an mxSystemLayout Called out from mxPrintToSystemLayout because it is also used in the <defaults> tag # may be floating point values Given an mxPrint object, return a list of StaffLayout objects (may be empty) >>> mxPrint = musicxml.mxObjects.Print() # this is a red-herring... does nothing here... >>> mxPrint.set('new-system', 'yes') >>> mxStaffLayout = musicxml.mxObjects.StaffLayout() >>> mxStaffLayout.staffDistance = 55 >>> mxStaffLayout.set('number', 1) >>> mxPrint.append(mxStaffLayout) >>> slList = musicxml.fromMxObjects.mxPrintToStaffLayoutList(mxPrint) >>> sl = slList[0] >>> sl.distance 55.0 >>> sl.staffNumber 1 get a StaffLayout object from an mxStaffLayout Called out from mxPrintToStaffLayoutList because it is also used in the <defaults> tag #----------------------------------------------------------------- # metadata Use an mxScore, to fill in parameters of a :class:`~music21.metadata.Metadata` object. if `inputM21` is None, a new `Metadata` object is created and returned at the end. Otherwise, the parameters of this Metadata object are changed and nothing is returned. # xml calls this title not name # may be set to none #environLocal.printDebug(['mxScoreToMetadata, got title', md.title]) # do an mx conversion for mxCreator to Contributor # not yet supported; an encoding is also found in identification obj # mxEncoding = mxScore.get('encodingObj') Given an mxCreator, fill the necessary parameters of a Contributor. >>> mxCreator = musicxml.mxObjects.Creator() >>> mxCreator.set('type', 'composer') >>> mxCreator.set('charData', 'Beethoven, <NAME>') >>> c = musicxml.fromMxObjects.mxCreatorToContributor(mxCreator) >>> c <music21.metadata.primitives.Contributor object at 0x...> >>> c.role 'composer' >>> c.name '<NAME>' # roles are not defined in musicxml #environLocal.printDebug(['mxCreatorToContributor:', 'received unknown Contributor role: %s' % mxCreatorType]) # remove any whitespace found #------------------------------------------------------------------------------- make it so that the tests that look for the old-style pitch.Pitch representation still work. #a = converter.parse(testPrimitive.simpleRepeat45a) # this is a good example with repeats #a = corpus.parse('opus41no1/movement3') #s.show() #s.show() # have 10 spanners # can get the same from a getAll search # try to get all spanners from the first note #s.show('t') #s.show() # parts are stored in component list #s.show() # @UndefinedVariable # only chords in the second part #s.show() #s.show() # this number will change as more are being imported #environLocal.printDebug(['pre s.measures(2,3)', 's', s]) # this needs to get all spanners too # all spanners are referenced over; even ones that may not be relevant #ex.show() # slurs are on measures 2, 3 # crescendos are on measures 4, 5 # wavy lines on measures 6, 7 # brackets etc. on measures 10-14 # glissando on measure 16 #s.show() # all in measure 2 #s.show() #p.show() # test placing text expression in arbitrary locations # get bass #p.show() #s.show() # test importing from musicxml # has one segno # has two codas # has one d.c.al coda # has repeats in it; start with single emasure # there are 2 for each part, totaling 8 # can get for each part as spanners are stored in Part now # TODO: need to test getting repeat brackets after measure extraction #s.parts[0].show() # 72 through 77 # 2 repeat brackets are gathered b/c they are stored at the Part by # default # testing problematic voice imports # this 2 part segments was importing multiple voices within # a measure, even though there was no data in the second voice # there are voices, but they have been removed #s.parts[0].show('t') #self.assertEqual(len(s.parts[0].voices), 2) #s.parts[0].show('t') # this case, there were 4, but there should be 2 #s.parts[0].show('t') # s = converter.parse(testPrimitive.mixedVoices1b) # s = converter.parse(testPrimitive.mixedVoices2) # has metronome marks defined, not with sound tag # get all tempo indications # TODO: look for files that only have sound tags and create MetronomeMarks # need to look for bundling of Words text expressions with tempo # has only sound tempo=x tag #s = converter.parse(testPrimitive.articulations01) #s.show() # test importing from musicxml #s.show() #NB: Finale apparently will not display a pitch that is a member of a chord without a stem #unless all chord members are without stems. #print xml #c.show() # for n in chordResult: # print n.stemDirection #raw = s.musicxml # three instruments; one initial, and then one for each transposition # should be 3 #4, F#4, F#4, F#4, F#4, F#4, F#4, F#4, F#4, F#4, F#4, F#4, F#4, F#4, F#4, F#4, B4, B4, B4, B4, B4, B4]') # chordification by default places notes at sounding pitch #sChords.show() # generate all transpositions on output #s.show() #s.show() # TODO: Get a better test... the single harmonic in the viola part, m. 482 is probably a mistake! # has many ornaments #s.flat.show('t') # include inverted turn # this is a good test as this encoding uses staffs, not parts # to encode both parts; this requires special spanner handling # TODO: this value should be 2, but due to staff encoding we # have orphaned spanners that are not cleaned up #environLocal.printDebug(['n1', n1, 'id(n1)', id(n1), slurs[0].getSpannedElementIds(), slurs[0].getSpannedElementIds()]) #environLocal.printDebug(['n2', n2, 'id(n2)', id(n2), slurs[0].getSpannedElementIds()]) # this produces a single component cresc #s.show() #s.show() #s.show() # dashes are imported as Lines (as are brackets) #s.show() #print match #5', 'C5', 'D-5', 'A-4', 'C5', 'C5']) #Rasing the BarException #all fine now, no exceptions here #Raising the BarException #------------------------------------------------------------------------------- # define presented order in documentation # sys.arg test options will be used in mainTest() #------------------------------------------------------------------------------ # eof | 2.236394 | 2 |
.conky_hud/dbus-mon.py | sacooper/dotfiles | 0 | 6621447 | <gh_stars>0
#! /bin/env/python
from gi.repository import GLib
import subprocess
import dbus
import datetime
import threading
import time
from dbus.mainloop.glib import DBusGMainLoop
messages = []
messages_map = {}
counter = 1
cco = 1
def curTime():
ts = datetime.datetime.timestamp(datetime.datetime.now())
return ts
def timestampTodate(ts):
return datetime.datetime.fromtimestamp(ts).strftime(' %H:%M ')
def winFocus():
global cco
global messages
global messages_map
while True:
win_f = subprocess.Popen(['xdotool', 'getwindowfocus'], stdout=subprocess.PIPE).communicate()[0].decode('utf-8')[:-1]
win_p = subprocess.Popen(['xdotool', 'getwindowpid', win_f], stdout=subprocess.PIPE).communicate()[0].decode('utf-8')[:-1]
path = '/proc/' + str(win_p) + '/comm'
win_title = subprocess.Popen(['cat', path], stdout=subprocess.PIPE).communicate()[0].decode('utf-8')[:-1]
# print(win_title)
if win_title == 'whatsie':
with open('/tmp/wa_notifications', 'w+') as f:
print('TOTAL 0', file=f, flush=True)
cco = 1
messages = []
messages_map = {}
time.sleep(1)
def notifs(bus, message):
global messages
global messages_map
global cco
if message.get_member() == 'Notify':
sender = message.get_args_list()[3][0:]
if sender not in messages_map:
messages_map.update({sender : len(messages)})
messages.append({'sender' : sender, 'counter' : counter, 'date' : curTime()})
else:
m = messages[messages_map[sender]]
m['counter'] += 1
m['date'] = curTime()
with open('/tmp/wa_notifications', 'w+') as f:
print('TOTAL ' + str(cco), file=f, flush=True)
for m in messages:
print(
m['sender'][:10].ljust(10),\
m['counter'],\
'${color1}' + str(timestampTodate(m['date'])) + '${color}',\
file=f,\
flush=True
)
cco += 1
if __name__ == '__main__':
DBusGMainLoop(set_as_default=True)
loop = GLib.MainLoop()
bus = dbus.SessionBus()
bus.add_match_string_non_blocking("eavesdrop=true, interface='org.freedesktop.Notifications'")
bus.add_message_filter(notifs)
threading.Thread(target=winFocus).start()
loop.run()
| #! /bin/env/python
from gi.repository import GLib
import subprocess
import dbus
import datetime
import threading
import time
from dbus.mainloop.glib import DBusGMainLoop
messages = []
messages_map = {}
counter = 1
cco = 1
def curTime():
ts = datetime.datetime.timestamp(datetime.datetime.now())
return ts
def timestampTodate(ts):
return datetime.datetime.fromtimestamp(ts).strftime(' %H:%M ')
def winFocus():
global cco
global messages
global messages_map
while True:
win_f = subprocess.Popen(['xdotool', 'getwindowfocus'], stdout=subprocess.PIPE).communicate()[0].decode('utf-8')[:-1]
win_p = subprocess.Popen(['xdotool', 'getwindowpid', win_f], stdout=subprocess.PIPE).communicate()[0].decode('utf-8')[:-1]
path = '/proc/' + str(win_p) + '/comm'
win_title = subprocess.Popen(['cat', path], stdout=subprocess.PIPE).communicate()[0].decode('utf-8')[:-1]
# print(win_title)
if win_title == 'whatsie':
with open('/tmp/wa_notifications', 'w+') as f:
print('TOTAL 0', file=f, flush=True)
cco = 1
messages = []
messages_map = {}
time.sleep(1)
def notifs(bus, message):
global messages
global messages_map
global cco
if message.get_member() == 'Notify':
sender = message.get_args_list()[3][0:]
if sender not in messages_map:
messages_map.update({sender : len(messages)})
messages.append({'sender' : sender, 'counter' : counter, 'date' : curTime()})
else:
m = messages[messages_map[sender]]
m['counter'] += 1
m['date'] = curTime()
with open('/tmp/wa_notifications', 'w+') as f:
print('TOTAL ' + str(cco), file=f, flush=True)
for m in messages:
print(
m['sender'][:10].ljust(10),\
m['counter'],\
'${color1}' + str(timestampTodate(m['date'])) + '${color}',\
file=f,\
flush=True
)
cco += 1
if __name__ == '__main__':
DBusGMainLoop(set_as_default=True)
loop = GLib.MainLoop()
bus = dbus.SessionBus()
bus.add_match_string_non_blocking("eavesdrop=true, interface='org.freedesktop.Notifications'")
bus.add_message_filter(notifs)
threading.Thread(target=winFocus).start()
loop.run() | en | 0.412424 | #! /bin/env/python # print(win_title) | 2.305195 | 2 |
teilab/__meta__.py | iwasakishuto/TeiLab-BasicLaboratoryWork-in-LifeScienceExperiments | 0 | 6621448 | <gh_stars>0
#coding: utf-8
__all__ = [
"__copyright__", "__version__", "__license__",
"__author__", "__author_address__", "__author_twitter__",
"__documentation__", "__github__",
"__principal_investigator__", "__labcolor__",
]
__copyright__ = "Copyright (C) 2021 <NAME>"
__version__ = "2021S" # Spring Semester
__license__ = "MIT"
__author__ = "iwasakishuto"
__author_address__ = "<EMAIL>"
__author_twitter__ = "https://twitter.com/cabernet_rock"
__documentation__ = "https://iwasakishuto.github.io/TeiLab-BasicLaboratoryWork-in-LifeScienceExperiments"
__github__ = "https://github.com/iwasakishuto/TeiLab-BasicLaboratoryWork-in-LifeScienceExperiments"
# ====== This is a joke, hahaha ======== #
__principal_investigator__ = "✨ 👩🔬 ✨" #
# __labcolor__ = "⚫" #
__labcolor__ = "⚪" #
# ====================================== #
| #coding: utf-8
__all__ = [
"__copyright__", "__version__", "__license__",
"__author__", "__author_address__", "__author_twitter__",
"__documentation__", "__github__",
"__principal_investigator__", "__labcolor__",
]
__copyright__ = "Copyright (C) 2021 <NAME>"
__version__ = "2021S" # Spring Semester
__license__ = "MIT"
__author__ = "iwasakishuto"
__author_address__ = "<EMAIL>"
__author_twitter__ = "https://twitter.com/cabernet_rock"
__documentation__ = "https://iwasakishuto.github.io/TeiLab-BasicLaboratoryWork-in-LifeScienceExperiments"
__github__ = "https://github.com/iwasakishuto/TeiLab-BasicLaboratoryWork-in-LifeScienceExperiments"
# ====== This is a joke, hahaha ======== #
__principal_investigator__ = "✨ 👩🔬 ✨" #
# __labcolor__ = "⚫" #
__labcolor__ = "⚪" #
# ====================================== # | en | 0.670829 | #coding: utf-8 # Spring Semester # ====== This is a joke, hahaha ======== # # # __labcolor__ = "⚫" # # # ====================================== # | 1.878402 | 2 |
chess/board.py | vanditkaria/chess | 20 | 6621449 | import chess
import table
def evaluate_board(board):
return sum(
piece_value(board.piece_at(square), square)
if board.piece_at(square) is not None else 0
for square in chess.SQUARES)
def piece_value(piece, square):
symbol = piece.symbol()
is_white = not symbol.islower()
row = convert_square(square, is_white)[0]
column = convert_square(square, is_white)[1]
score = 1 if is_white else -1
if symbol.lower() == 'p':
score *= (1000 + table.PAWN[row][column])
elif symbol.lower() == 'n':
score *= (3000 + table.KNIGHT[row][column])
elif symbol.lower() == 'b':
score *= (3000 + table.BISHOP[row][column])
elif symbol.lower() == 'r':
score *= (5000 + table.ROOK[row][column])
elif symbol.lower() == 'q':
score *= (9000 + table.QUEEN[row][column])
elif symbol.lower() == 'k':
score *= (1000000 + table.KING[row][column])
return score
def convert_square(square, is_white):
row = 7 - (square // 8) if is_white else square // 8
column = square % 8
return (row, column)
| import chess
import table
def evaluate_board(board):
return sum(
piece_value(board.piece_at(square), square)
if board.piece_at(square) is not None else 0
for square in chess.SQUARES)
def piece_value(piece, square):
symbol = piece.symbol()
is_white = not symbol.islower()
row = convert_square(square, is_white)[0]
column = convert_square(square, is_white)[1]
score = 1 if is_white else -1
if symbol.lower() == 'p':
score *= (1000 + table.PAWN[row][column])
elif symbol.lower() == 'n':
score *= (3000 + table.KNIGHT[row][column])
elif symbol.lower() == 'b':
score *= (3000 + table.BISHOP[row][column])
elif symbol.lower() == 'r':
score *= (5000 + table.ROOK[row][column])
elif symbol.lower() == 'q':
score *= (9000 + table.QUEEN[row][column])
elif symbol.lower() == 'k':
score *= (1000000 + table.KING[row][column])
return score
def convert_square(square, is_white):
row = 7 - (square // 8) if is_white else square // 8
column = square % 8
return (row, column)
| none | 1 | 3.513822 | 4 | |
src/simple_bot_handler.py | cltl/chatbot | 0 | 6621450 | import requests
class BotHandler:
def __init__(self, token):
self.token = token
self.api_url = "https://api.telegram.org/bot{}/".format(token)
def get_messages(self, offset=None, timeout=200):
""" Function to get all messages sent to the bot """
method = 'getUpdates'
params = {'timeout': timeout, 'offset': offset}
resp = requests.get(self.api_url + method, params)
return resp.json()['result']
def send_message_to(self, chat_id, text):
""" Function to send a message from the bot to a specific user"""
params = {'chat_id': chat_id, 'text': text}
method = 'sendMessage'
resp = requests.post(self.api_url + method, params)
return resp
def get_last_message_by(self, chat_id):
""" Function to get the last message sent to the bot by a specific user"""
messages = self.get_messages()
messages_by_user = list(filter(lambda d: d['message']['chat']['id'] == chat_id, messages))
if messages_by_user:
last_message = messages_by_user[-1]
else:
last_message = []
return last_message
| import requests
class BotHandler:
def __init__(self, token):
self.token = token
self.api_url = "https://api.telegram.org/bot{}/".format(token)
def get_messages(self, offset=None, timeout=200):
""" Function to get all messages sent to the bot """
method = 'getUpdates'
params = {'timeout': timeout, 'offset': offset}
resp = requests.get(self.api_url + method, params)
return resp.json()['result']
def send_message_to(self, chat_id, text):
""" Function to send a message from the bot to a specific user"""
params = {'chat_id': chat_id, 'text': text}
method = 'sendMessage'
resp = requests.post(self.api_url + method, params)
return resp
def get_last_message_by(self, chat_id):
""" Function to get the last message sent to the bot by a specific user"""
messages = self.get_messages()
messages_by_user = list(filter(lambda d: d['message']['chat']['id'] == chat_id, messages))
if messages_by_user:
last_message = messages_by_user[-1]
else:
last_message = []
return last_message
| en | 0.78359 | Function to get all messages sent to the bot Function to send a message from the bot to a specific user Function to get the last message sent to the bot by a specific user | 3.046944 | 3 |